From 8daa83a594a2e98f39d764422bfbdbc62c9efd44 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Fri, 19 Apr 2024 19:20:00 +0200 Subject: Adding upstream version 2:4.20.0+dfsg. Signed-off-by: Daniel Baumann --- python/samba/__init__.py | 400 + python/samba/auth_util.py | 34 + python/samba/colour.py | 175 + python/samba/common.py | 107 + python/samba/dbchecker.py | 2935 +++++++ python/samba/descriptor.py | 723 ++ python/samba/dnsresolver.py | 68 + python/samba/dnsserver.py | 405 + python/samba/domain_update.py | 573 ++ python/samba/drs_utils.py | 456 + python/samba/emulate/__init__.py | 16 + python/samba/emulate/traffic.py | 2415 ++++++ python/samba/emulate/traffic_packets.py | 973 +++ python/samba/forest_update.py | 543 ++ python/samba/functional_level.py | 83 + python/samba/getopt.py | 539 ++ python/samba/gkdi.py | 397 + python/samba/gp/__init__.py | 17 + python/samba/gp/gp_centrify_crontab_ext.py | 135 + python/samba/gp/gp_centrify_sudoers_ext.py | 80 + python/samba/gp/gp_cert_auto_enroll_ext.py | 572 ++ python/samba/gp/gp_chromium_ext.py | 473 ++ python/samba/gp/gp_drive_maps_ext.py | 168 + python/samba/gp/gp_ext_loader.py | 59 + python/samba/gp/gp_firefox_ext.py | 219 + python/samba/gp/gp_firewalld_ext.py | 171 + python/samba/gp/gp_gnome_settings_ext.py | 418 + python/samba/gp/gp_msgs_ext.py | 96 + python/samba/gp/gp_scripts_ext.py | 187 + python/samba/gp/gp_sec_ext.py | 221 + python/samba/gp/gp_smb_conf_ext.py | 127 + python/samba/gp/gp_sudoers_ext.py | 116 + python/samba/gp/gpclass.py | 1312 +++ python/samba/gp/util/logging.py | 112 + python/samba/gp/vgp_access_ext.py | 178 + python/samba/gp/vgp_files_ext.py | 140 + python/samba/gp/vgp_issue_ext.py | 90 + python/samba/gp/vgp_motd_ext.py | 90 + python/samba/gp/vgp_openssh_ext.py | 115 + python/samba/gp/vgp_startup_scripts_ext.py | 136 + python/samba/gp/vgp_sudoers_ext.py | 97 + python/samba/gp/vgp_symlink_ext.py | 76 + python/samba/gp_parse/__init__.py | 185 + python/samba/gp_parse/gp_aas.py | 25 + python/samba/gp_parse/gp_csv.py | 102 + python/samba/gp_parse/gp_inf.py | 378 + python/samba/gp_parse/gp_ini.py | 228 + python/samba/gp_parse/gp_pol.py | 151 + python/samba/graph.py | 820 ++ python/samba/hostconfig.py | 81 + python/samba/idmap.py | 99 + python/samba/join.py | 1786 ++++ python/samba/kcc/__init__.py | 2754 ++++++ python/samba/kcc/debug.py | 61 + python/samba/kcc/graph.py | 859 ++ python/samba/kcc/graph_utils.py | 343 + python/samba/kcc/kcc_utils.py | 2364 ++++++ python/samba/kcc/ldif_import_export.py | 403 + python/samba/logger.py | 69 + python/samba/mdb_util.py | 43 + python/samba/ms_display_specifiers.py | 195 + python/samba/ms_forest_updates_markdown.py | 309 + python/samba/ms_schema.py | 337 + python/samba/ms_schema_markdown.py | 78 + python/samba/ndr.py | 153 + python/samba/netcmd/__init__.py | 396 + python/samba/netcmd/common.py | 161 + python/samba/netcmd/computer.py | 729 ++ python/samba/netcmd/contact.py | 861 ++ python/samba/netcmd/dbcheck.py | 193 + python/samba/netcmd/delegation.py | 689 ++ python/samba/netcmd/dns.py | 1394 +++ python/samba/netcmd/domain/__init__.py | 73 + python/samba/netcmd/domain/auth/__init__.py | 35 + python/samba/netcmd/domain/auth/policy.py | 685 ++ python/samba/netcmd/domain/auth/silo.py | 402 + python/samba/netcmd/domain/auth/silo_member.py | 201 + python/samba/netcmd/domain/backup.py | 1256 +++ python/samba/netcmd/domain/claim/__init__.py | 35 + python/samba/netcmd/domain/claim/claim_type.py | 361 + python/samba/netcmd/domain/claim/value_type.py | 105 + python/samba/netcmd/domain/classicupgrade.py | 189 + python/samba/netcmd/domain/common.py | 64 + python/samba/netcmd/domain/dcpromo.py | 90 + python/samba/netcmd/domain/demote.py | 335 + python/samba/netcmd/domain/functional_prep.py | 145 + python/samba/netcmd/domain/info.py | 58 + python/samba/netcmd/domain/join.py | 146 + python/samba/netcmd/domain/keytab.py | 55 + python/samba/netcmd/domain/leave.py | 59 + python/samba/netcmd/domain/level.py | 250 + python/samba/netcmd/domain/models/__init__.py | 32 + python/samba/netcmd/domain/models/auth_policy.py | 109 + python/samba/netcmd/domain/models/auth_silo.py | 104 + python/samba/netcmd/domain/models/claim_type.py | 58 + python/samba/netcmd/domain/models/exceptions.py | 64 + python/samba/netcmd/domain/models/fields.py | 507 ++ python/samba/netcmd/domain/models/group.py | 42 + python/samba/netcmd/domain/models/model.py | 426 + python/samba/netcmd/domain/models/query.py | 81 + python/samba/netcmd/domain/models/schema.py | 124 + python/samba/netcmd/domain/models/site.py | 47 + python/samba/netcmd/domain/models/subnet.py | 45 + python/samba/netcmd/domain/models/user.py | 75 + python/samba/netcmd/domain/models/value_type.py | 96 + python/samba/netcmd/domain/passwordsettings.py | 316 + python/samba/netcmd/domain/provision.py | 405 + python/samba/netcmd/domain/samba3upgrade.py | 34 + python/samba/netcmd/domain/schemaupgrade.py | 350 + python/samba/netcmd/domain/tombstones.py | 116 + python/samba/netcmd/domain/trust.py | 2338 +++++ python/samba/netcmd/drs.py | 874 ++ python/samba/netcmd/dsacl.py | 217 + python/samba/netcmd/encoders.py | 49 + python/samba/netcmd/forest.py | 167 + python/samba/netcmd/fsmo.py | 535 ++ python/samba/netcmd/gpcommon.py | 55 + python/samba/netcmd/gpo.py | 4513 ++++++++++ python/samba/netcmd/group.py | 1416 ++++ python/samba/netcmd/ldapcmp.py | 984 +++ python/samba/netcmd/main.py | 98 + python/samba/netcmd/nettime.py | 60 + python/samba/netcmd/ntacl.py | 503 ++ python/samba/netcmd/ou.py | 411 + python/samba/netcmd/processes.py | 142 + python/samba/netcmd/pso.py | 794 ++ python/samba/netcmd/rodc.py | 163 + python/samba/netcmd/schema.py | 319 + python/samba/netcmd/shell.py | 74 + python/samba/netcmd/sites.py | 348 + python/samba/netcmd/spn.py | 210 + python/samba/netcmd/testparm.py | 236 + python/samba/netcmd/user/__init__.py | 70 + python/samba/netcmd/user/add.py | 209 + python/samba/netcmd/user/add_unix_attrs.py | 244 + python/samba/netcmd/user/auth/__init__.py | 35 + python/samba/netcmd/user/auth/policy.py | 170 + python/samba/netcmd/user/auth/silo.py | 189 + python/samba/netcmd/user/delete.py | 87 + python/samba/netcmd/user/disable.py | 64 + python/samba/netcmd/user/edit.py | 136 + python/samba/netcmd/user/enable.py | 94 + python/samba/netcmd/user/getgroups.py | 120 + python/samba/netcmd/user/list.py | 108 + python/samba/netcmd/user/move.py | 106 + python/samba/netcmd/user/password.py | 73 + python/samba/netcmd/user/readpasswords/__init__.py | 25 + python/samba/netcmd/user/readpasswords/common.py | 907 ++ .../user/readpasswords/get_kerberos_ticket.py | 146 + .../samba/netcmd/user/readpasswords/getpassword.py | 210 + python/samba/netcmd/user/readpasswords/show.py | 144 + .../netcmd/user/readpasswords/syncpasswords.py | 878 ++ python/samba/netcmd/user/rename.py | 249 + python/samba/netcmd/user/sensitive.py | 83 + python/samba/netcmd/user/setexpiry.py | 101 + python/samba/netcmd/user/setpassword.py | 161 + python/samba/netcmd/user/setprimarygroup.py | 138 + python/samba/netcmd/user/unlock.py | 99 + python/samba/netcmd/validators.py | 66 + python/samba/netcmd/visualize.py | 705 ++ python/samba/nt_time.py | 60 + python/samba/ntacls.py | 662 ++ python/samba/policies.py | 388 + python/samba/provision/__init__.py | 2524 ++++++ python/samba/provision/backend.py | 87 + python/samba/provision/common.py | 91 + python/samba/provision/kerberos.py | 104 + python/samba/provision/sambadns.py | 1329 +++ python/samba/remove_dc.py | 466 + python/samba/safe_tarfile.py | 94 + python/samba/samba3/__init__.py | 409 + python/samba/samba3/libsmb_samba_internal.py | 130 + python/samba/samdb.py | 1623 ++++ python/samba/schema.py | 264 + python/samba/sd_utils.py | 231 + python/samba/sites.py | 126 + python/samba/subnets.py | 247 + python/samba/subunit/__init__.py | 85 + python/samba/subunit/run.py | 682 ++ python/samba/tdb_util.py | 46 + python/samba/tests/__init__.py | 824 ++ python/samba/tests/audit_log_base.py | 206 + python/samba/tests/audit_log_dsdb.py | 634 ++ python/samba/tests/audit_log_pass_change.py | 331 + python/samba/tests/auth.py | 102 + python/samba/tests/auth_log.py | 1489 ++++ python/samba/tests/auth_log_base.py | 221 + python/samba/tests/auth_log_ncalrpc.py | 102 + python/samba/tests/auth_log_netlogon.py | 134 + python/samba/tests/auth_log_netlogon_bad_creds.py | 190 + python/samba/tests/auth_log_pass_change.py | 282 + python/samba/tests/auth_log_samlogon.py | 181 + python/samba/tests/auth_log_winbind.py | 460 + python/samba/tests/bin/cepces-submit | 18 + python/samba/tests/bin/crontab | 29 + python/samba/tests/bin/firewall-cmd | 114 + python/samba/tests/bin/getcert | 84 + python/samba/tests/bin/gio | 11 + python/samba/tests/blackbox/__init__.py | 17 + python/samba/tests/blackbox/bug13653.py | 216 + python/samba/tests/blackbox/check_output.py | 108 + python/samba/tests/blackbox/claims.py | 526 ++ python/samba/tests/blackbox/downgradedatabase.py | 167 + python/samba/tests/blackbox/mdsearch.py | 126 + python/samba/tests/blackbox/ndrdump.py | 563 ++ python/samba/tests/blackbox/netads_dns.py | 83 + python/samba/tests/blackbox/netads_json.py | 81 + .../tests/blackbox/rpcd_witness_samba_only.py | 1338 +++ python/samba/tests/blackbox/samba_dnsupdate.py | 125 + python/samba/tests/blackbox/smbcacls.py | 148 + python/samba/tests/blackbox/smbcacls_basic.py | 129 + .../blackbox/smbcacls_dfs_propagate_inherit.py | 84 + .../blackbox/smbcacls_propagate_inhertance.py | 1290 +++ .../samba/tests/blackbox/smbcacls_save_restore.py | 205 + python/samba/tests/blackbox/smbcontrol.py | 82 + python/samba/tests/blackbox/smbcontrol_process.py | 131 + .../testdata/traffic-sample-very-short.model | 61 + .../testdata/traffic-sample-very-short.txt | 50 + .../blackbox/testdata/traffic_learner.expected | 61 + .../blackbox/testdata/traffic_replay-0.expected | 18 + .../blackbox/testdata/traffic_replay-1.expected | 19 + .../blackbox/testdata/traffic_replay-2.expected | 17 + .../blackbox/testdata/traffic_replay-3.expected | 11 + .../blackbox/testdata/traffic_replay.expected | 18 + .../blackbox/testdata/traffic_summary.expected | 29 + .../tests/blackbox/testdata/traffic_summary.pdml | 4989 +++++++++++ python/samba/tests/blackbox/traffic_learner.py | 71 + python/samba/tests/blackbox/traffic_replay.py | 100 + python/samba/tests/blackbox/traffic_summary.py | 53 + python/samba/tests/common.py | 66 + python/samba/tests/complex_expressions.py | 487 ++ python/samba/tests/compression.py | 210 + python/samba/tests/conditional_ace_assembler.py | 227 + python/samba/tests/conditional_ace_bytes.py | 95 + python/samba/tests/conditional_ace_claims.py | 901 ++ python/samba/tests/core.py | 83 + python/samba/tests/cred_opt.py | 155 + python/samba/tests/credentials.py | 501 ++ python/samba/tests/dcerpc/__init__.py | 19 + python/samba/tests/dcerpc/array.py | 206 + python/samba/tests/dcerpc/bare.py | 61 + python/samba/tests/dcerpc/binding.py | 101 + python/samba/tests/dcerpc/createtrustrelax.py | 129 + python/samba/tests/dcerpc/dnsserver.py | 1314 +++ python/samba/tests/dcerpc/integer.py | 250 + python/samba/tests/dcerpc/lsa.py | 333 + python/samba/tests/dcerpc/mdssvc.py | 194 + python/samba/tests/dcerpc/misc.py | 101 + python/samba/tests/dcerpc/raw_protocol.py | 7514 +++++++++++++++++ python/samba/tests/dcerpc/raw_testcase.py | 1177 +++ python/samba/tests/dcerpc/registry.py | 51 + python/samba/tests/dcerpc/rpc_talloc.py | 86 + python/samba/tests/dcerpc/rpcecho.py | 71 + python/samba/tests/dcerpc/sam.py | 783 ++ python/samba/tests/dcerpc/samr_change_password.py | 187 + python/samba/tests/dcerpc/srvsvc.py | 68 + python/samba/tests/dcerpc/string_tests.py | 132 + python/samba/tests/dcerpc/testrpc.py | 143 + python/samba/tests/dcerpc/unix.py | 43 + python/samba/tests/dckeytab.py | 64 + python/samba/tests/dns.py | 2247 +++++ python/samba/tests/dns_aging.py | 2777 ++++++ python/samba/tests/dns_base.py | 437 + python/samba/tests/dns_forwarder.py | 600 ++ python/samba/tests/dns_forwarder_helpers/server.py | 104 + python/samba/tests/dns_invalid.py | 80 + python/samba/tests/dns_packet.py | 230 + python/samba/tests/dns_tkey.py | 208 + python/samba/tests/dns_wildcard.py | 336 + python/samba/tests/docs.py | 511 ++ python/samba/tests/domain_backup.py | 624 ++ python/samba/tests/domain_backup_offline.py | 252 + python/samba/tests/dsdb.py | 1223 +++ python/samba/tests/dsdb_api.py | 57 + python/samba/tests/dsdb_dns.py | 85 + python/samba/tests/dsdb_lock.py | 374 + python/samba/tests/dsdb_schema_attributes.py | 249 + python/samba/tests/emulate/__init__.py | 17 + python/samba/tests/emulate/traffic.py | 164 + python/samba/tests/emulate/traffic_packet.py | 736 ++ python/samba/tests/encrypted_secrets.py | 83 + python/samba/tests/gensec.py | 259 + python/samba/tests/get_opt.py | 69 + python/samba/tests/getdcname.py | 700 ++ python/samba/tests/gkdi.py | 647 ++ python/samba/tests/glue.py | 90 + python/samba/tests/gpo.py | 8192 ++++++++++++++++++ python/samba/tests/gpo_member.py | 39 + python/samba/tests/graph.py | 532 ++ python/samba/tests/group_audit.py | 395 + python/samba/tests/hostconfig.py | 74 + python/samba/tests/imports.py | 31 + python/samba/tests/join.py | 175 + python/samba/tests/kcc/__init__.py | 90 + python/samba/tests/kcc/graph.py | 67 + python/samba/tests/kcc/graph_utils.py | 165 + python/samba/tests/kcc/kcc_utils.py | 393 + python/samba/tests/kcc/ldif_import_export.py | 240 + python/samba/tests/krb5/alias_tests.py | 202 + .../samba/tests/krb5/as_canonicalization_tests.py | 474 ++ python/samba/tests/krb5/as_req_tests.py | 606 ++ python/samba/tests/krb5/authn_policy_tests.py | 8903 ++++++++++++++++++++ python/samba/tests/krb5/claims_in_pac.py | 490 ++ python/samba/tests/krb5/claims_tests.py | 2032 +++++ python/samba/tests/krb5/compatability_tests.py | 227 + python/samba/tests/krb5/conditional_ace_tests.py | 5588 ++++++++++++ python/samba/tests/krb5/device_tests.py | 2211 +++++ python/samba/tests/krb5/etype_tests.py | 597 ++ python/samba/tests/krb5/fast_tests.py | 2108 +++++ python/samba/tests/krb5/gkdi_tests.py | 745 ++ python/samba/tests/krb5/group_tests.py | 1967 +++++ python/samba/tests/krb5/kcrypto.py | 969 +++ python/samba/tests/krb5/kdc_base_test.py | 3755 +++++++++ python/samba/tests/krb5/kdc_tests.py | 228 + python/samba/tests/krb5/kdc_tgs_tests.py | 3506 ++++++++ python/samba/tests/krb5/kdc_tgt_tests.py | 86 + python/samba/tests/krb5/kpasswd_tests.py | 983 +++ python/samba/tests/krb5/lockout_tests.py | 1137 +++ .../krb5/ms_kile_client_principal_lookup_tests.py | 818 ++ python/samba/tests/krb5/nt_hash_tests.py | 142 + python/samba/tests/krb5/pac_align_tests.py | 93 + python/samba/tests/krb5/pkinit_tests.py | 1211 +++ python/samba/tests/krb5/protected_users_tests.py | 1053 +++ python/samba/tests/krb5/pyasn1_regen.sh | 42 + python/samba/tests/krb5/raw_testcase.py | 6221 ++++++++++++++ python/samba/tests/krb5/rfc4120.asn1 | 1908 +++++ python/samba/tests/krb5/rfc4120_constants.py | 247 + python/samba/tests/krb5/rfc4120_pyasn1.py | 92 + .../samba/tests/krb5/rfc4120_pyasn1_generated.py | 2690 ++++++ python/samba/tests/krb5/rodc_tests.py | 77 + python/samba/tests/krb5/s4u_tests.py | 1838 ++++ python/samba/tests/krb5/salt_tests.py | 469 ++ python/samba/tests/krb5/simple_tests.py | 185 + python/samba/tests/krb5/spn_tests.py | 212 + python/samba/tests/krb5/test_ccache.py | 173 + python/samba/tests/krb5/test_idmap_nss.py | 232 + python/samba/tests/krb5/test_ldap.py | 168 + python/samba/tests/krb5/test_min_domain_uid.py | 122 + python/samba/tests/krb5/test_rpc.py | 138 + python/samba/tests/krb5/test_smb.py | 153 + python/samba/tests/krb5/xrealm_tests.py | 187 + python/samba/tests/krb5_credentials.py | 111 + python/samba/tests/ldap_raw.py | 939 +++ python/samba/tests/ldap_referrals.py | 87 + python/samba/tests/ldap_spn.py | 924 ++ python/samba/tests/ldap_upn_sam_account.py | 510 ++ python/samba/tests/ldap_whoami.py | 38 + python/samba/tests/libsmb-basic.py | 268 + python/samba/tests/libsmb.py | 55 + python/samba/tests/loadparm.py | 84 + python/samba/tests/logfiles.py | 381 + python/samba/tests/lsa_string.py | 68 + python/samba/tests/messaging.py | 174 + python/samba/tests/ndr/gkdi.py | 397 + python/samba/tests/ndr/gmsa.py | 99 + python/samba/tests/ndr/wbint.py | 139 + python/samba/tests/net_join.py | 63 + python/samba/tests/net_join_no_spnego.py | 90 + python/samba/tests/netbios.py | 65 + python/samba/tests/netcmd.py | 165 + python/samba/tests/netlogonsvc.py | 66 + python/samba/tests/ntacls.py | 87 + python/samba/tests/ntacls_backup.py | 198 + python/samba/tests/ntlm_auth.py | 342 + python/samba/tests/ntlm_auth_base.py | 210 + python/samba/tests/ntlm_auth_krb5.py | 83 + python/samba/tests/ntlmdisabled.py | 84 + python/samba/tests/pam_winbind.py | 72 + python/samba/tests/pam_winbind_chauthtok.py | 42 + python/samba/tests/pam_winbind_setcred.py | 56 + python/samba/tests/pam_winbind_warn_pwd_expire.py | 52 + python/samba/tests/param.py | 107 + python/samba/tests/password_hash.py | 335 + python/samba/tests/password_hash_fl2003.py | 196 + python/samba/tests/password_hash_fl2008.py | 207 + python/samba/tests/password_hash_gpgme.py | 293 + python/samba/tests/password_hash_ldap.py | 129 + python/samba/tests/password_quality.py | 52 + python/samba/tests/password_test.py | 59 + python/samba/tests/policy.py | 34 + python/samba/tests/posixacl.py | 878 ++ python/samba/tests/prefork_restart.py | 462 + python/samba/tests/process_limits.py | 70 + python/samba/tests/provision.py | 201 + python/samba/tests/pso.py | 272 + python/samba/tests/py_credentials.py | 677 ++ python/samba/tests/registry.py | 79 + python/samba/tests/reparsepoints.py | 241 + python/samba/tests/s3_net_join.py | 77 + python/samba/tests/s3idmapdb.py | 57 + python/samba/tests/s3param.py | 50 + python/samba/tests/s3passdb.py | 138 + python/samba/tests/s3registry.py | 53 + python/samba/tests/s3windb.py | 45 + python/samba/tests/safe_tarfile.py | 81 + python/samba/tests/samba3sam.py | 1125 +++ python/samba/tests/samba_startup_fl_change.py | 180 + python/samba/tests/samba_tool/__init__.py | 15 + python/samba/tests/samba_tool/base.py | 137 + python/samba/tests/samba_tool/computer.py | 378 + python/samba/tests/samba_tool/computer_edit.sh | 197 + python/samba/tests/samba_tool/contact.py | 468 + python/samba/tests/samba_tool/contact_edit.sh | 183 + python/samba/tests/samba_tool/demote.py | 106 + python/samba/tests/samba_tool/dnscmd.py | 1506 ++++ .../samba/tests/samba_tool/domain_auth_policy.py | 1517 ++++ python/samba/tests/samba_tool/domain_auth_silo.py | 618 ++ python/samba/tests/samba_tool/domain_claim.py | 608 ++ python/samba/tests/samba_tool/domain_models.py | 416 + .../samba_tool/drs_clone_dc_data_lmdb_size.py | 119 + python/samba/tests/samba_tool/dsacl.py | 211 + python/samba/tests/samba_tool/forest.py | 70 + python/samba/tests/samba_tool/fsmo.py | 52 + python/samba/tests/samba_tool/gpo.py | 1847 ++++ python/samba/tests/samba_tool/gpo_exts.py | 202 + python/samba/tests/samba_tool/group.py | 613 ++ python/samba/tests/samba_tool/group_edit.sh | 228 + python/samba/tests/samba_tool/help.py | 81 + python/samba/tests/samba_tool/join.py | 31 + python/samba/tests/samba_tool/join_lmdb_size.py | 152 + python/samba/tests/samba_tool/join_member.py | 71 + python/samba/tests/samba_tool/ntacl.py | 247 + python/samba/tests/samba_tool/ou.py | 291 + python/samba/tests/samba_tool/passwordsettings.py | 484 ++ python/samba/tests/samba_tool/processes.py | 42 + .../samba/tests/samba_tool/promote_dc_lmdb_size.py | 174 + .../samba/tests/samba_tool/provision_lmdb_size.py | 132 + .../tests/samba_tool/provision_password_check.py | 57 + .../samba_tool/provision_userPassword_crypt.py | 67 + python/samba/tests/samba_tool/rodc.py | 131 + python/samba/tests/samba_tool/schema.py | 109 + python/samba/tests/samba_tool/silo_base.py | 229 + python/samba/tests/samba_tool/sites.py | 205 + python/samba/tests/samba_tool/timecmd.py | 44 + python/samba/tests/samba_tool/user.py | 1246 +++ python/samba/tests/samba_tool/user_auth_policy.py | 86 + python/samba/tests/samba_tool/user_auth_silo.py | 84 + .../tests/samba_tool/user_check_password_script.py | 106 + python/samba/tests/samba_tool/user_edit.sh | 198 + .../tests/samba_tool/user_get_kerberos_ticket.py | 195 + .../tests/samba_tool/user_getpassword_gmsa.py | 171 + .../samba/tests/samba_tool/user_virtualCryptSHA.py | 516 ++ .../tests/samba_tool/user_virtualCryptSHA_base.py | 99 + .../tests/samba_tool/user_virtualCryptSHA_gpg.py | 262 + .../user_virtualCryptSHA_userPassword.py | 188 + python/samba/tests/samba_tool/user_wdigest.py | 450 + python/samba/tests/samba_tool/visualize.py | 618 ++ python/samba/tests/samba_tool/visualize_drs.py | 636 ++ python/samba/tests/samba_upgradedns_lmdb.py | 75 + python/samba/tests/samdb.py | 66 + python/samba/tests/samdb_api.py | 148 + python/samba/tests/sddl.py | 894 ++ python/samba/tests/sddl_conditional_ace.py | 52 + python/samba/tests/security.py | 209 + python/samba/tests/security_descriptors.py | 216 + python/samba/tests/segfault.py | 243 + python/samba/tests/sid_strings.py | 608 ++ python/samba/tests/smb-notify.py | 429 + python/samba/tests/smb.py | 236 + python/samba/tests/smb1posix.py | 71 + python/samba/tests/smb2symlink.py | 216 + python/samba/tests/smb3unix.py | 418 + python/samba/tests/smbconf.py | 352 + python/samba/tests/smbd_base.py | 48 + python/samba/tests/smbd_fuzztest.py | 76 + python/samba/tests/source.py | 242 + python/samba/tests/source_chars.py | 326 + python/samba/tests/strings.py | 99 + python/samba/tests/subunitrun.py | 63 + python/samba/tests/tdb_util.py | 50 + python/samba/tests/test_pam_winbind.sh | 46 + python/samba/tests/test_pam_winbind_chauthtok.sh | 77 + python/samba/tests/test_pam_winbind_setcred.sh | 46 + .../tests/test_pam_winbind_warn_pwd_expire.sh | 75 + python/samba/tests/token_factory.py | 256 + python/samba/tests/upgrade.py | 40 + python/samba/tests/upgradeprovision.py | 155 + python/samba/tests/upgradeprovisionneeddc.py | 181 + python/samba/tests/usage.py | 380 + python/samba/tests/xattr.py | 159 + python/samba/trust_utils.py | 62 + python/samba/upgrade.py | 849 ++ python/samba/upgradehelpers.py | 834 ++ python/samba/uptodateness.py | 201 + python/samba/xattr.py | 60 + 485 files changed, 218254 insertions(+) create mode 100644 python/samba/__init__.py create mode 100644 python/samba/auth_util.py create mode 100644 python/samba/colour.py create mode 100644 python/samba/common.py create mode 100644 python/samba/dbchecker.py create mode 100644 python/samba/descriptor.py create mode 100644 python/samba/dnsresolver.py create mode 100644 python/samba/dnsserver.py create mode 100644 python/samba/domain_update.py create mode 100644 python/samba/drs_utils.py create mode 100644 python/samba/emulate/__init__.py create mode 100644 python/samba/emulate/traffic.py create mode 100644 python/samba/emulate/traffic_packets.py create mode 100644 python/samba/forest_update.py create mode 100644 python/samba/functional_level.py create mode 100644 python/samba/getopt.py create mode 100644 python/samba/gkdi.py create mode 100644 python/samba/gp/__init__.py create mode 100644 python/samba/gp/gp_centrify_crontab_ext.py create mode 100644 python/samba/gp/gp_centrify_sudoers_ext.py create mode 100644 python/samba/gp/gp_cert_auto_enroll_ext.py create mode 100644 python/samba/gp/gp_chromium_ext.py create mode 100644 python/samba/gp/gp_drive_maps_ext.py create mode 100644 python/samba/gp/gp_ext_loader.py create mode 100644 python/samba/gp/gp_firefox_ext.py create mode 100644 python/samba/gp/gp_firewalld_ext.py create mode 100644 python/samba/gp/gp_gnome_settings_ext.py create mode 100644 python/samba/gp/gp_msgs_ext.py create mode 100644 python/samba/gp/gp_scripts_ext.py create mode 100644 python/samba/gp/gp_sec_ext.py create mode 100644 python/samba/gp/gp_smb_conf_ext.py create mode 100644 python/samba/gp/gp_sudoers_ext.py create mode 100644 python/samba/gp/gpclass.py create mode 100644 python/samba/gp/util/logging.py create mode 100644 python/samba/gp/vgp_access_ext.py create mode 100644 python/samba/gp/vgp_files_ext.py create mode 100644 python/samba/gp/vgp_issue_ext.py create mode 100644 python/samba/gp/vgp_motd_ext.py create mode 100644 python/samba/gp/vgp_openssh_ext.py create mode 100644 python/samba/gp/vgp_startup_scripts_ext.py create mode 100644 python/samba/gp/vgp_sudoers_ext.py create mode 100644 python/samba/gp/vgp_symlink_ext.py create mode 100644 python/samba/gp_parse/__init__.py create mode 100644 python/samba/gp_parse/gp_aas.py create mode 100644 python/samba/gp_parse/gp_csv.py create mode 100644 python/samba/gp_parse/gp_inf.py create mode 100644 python/samba/gp_parse/gp_ini.py create mode 100644 python/samba/gp_parse/gp_pol.py create mode 100644 python/samba/graph.py create mode 100644 python/samba/hostconfig.py create mode 100644 python/samba/idmap.py create mode 100644 python/samba/join.py create mode 100644 python/samba/kcc/__init__.py create mode 100644 python/samba/kcc/debug.py create mode 100644 python/samba/kcc/graph.py create mode 100644 python/samba/kcc/graph_utils.py create mode 100644 python/samba/kcc/kcc_utils.py create mode 100644 python/samba/kcc/ldif_import_export.py create mode 100644 python/samba/logger.py create mode 100644 python/samba/mdb_util.py create mode 100644 python/samba/ms_display_specifiers.py create mode 100644 python/samba/ms_forest_updates_markdown.py create mode 100644 python/samba/ms_schema.py create mode 100644 python/samba/ms_schema_markdown.py create mode 100644 python/samba/ndr.py create mode 100644 python/samba/netcmd/__init__.py create mode 100644 python/samba/netcmd/common.py create mode 100644 python/samba/netcmd/computer.py create mode 100644 python/samba/netcmd/contact.py create mode 100644 python/samba/netcmd/dbcheck.py create mode 100644 python/samba/netcmd/delegation.py create mode 100644 python/samba/netcmd/dns.py create mode 100644 python/samba/netcmd/domain/__init__.py create mode 100644 python/samba/netcmd/domain/auth/__init__.py create mode 100644 python/samba/netcmd/domain/auth/policy.py create mode 100644 python/samba/netcmd/domain/auth/silo.py create mode 100644 python/samba/netcmd/domain/auth/silo_member.py create mode 100644 python/samba/netcmd/domain/backup.py create mode 100644 python/samba/netcmd/domain/claim/__init__.py create mode 100644 python/samba/netcmd/domain/claim/claim_type.py create mode 100644 python/samba/netcmd/domain/claim/value_type.py create mode 100644 python/samba/netcmd/domain/classicupgrade.py create mode 100644 python/samba/netcmd/domain/common.py create mode 100644 python/samba/netcmd/domain/dcpromo.py create mode 100644 python/samba/netcmd/domain/demote.py create mode 100644 python/samba/netcmd/domain/functional_prep.py create mode 100644 python/samba/netcmd/domain/info.py create mode 100644 python/samba/netcmd/domain/join.py create mode 100644 python/samba/netcmd/domain/keytab.py create mode 100644 python/samba/netcmd/domain/leave.py create mode 100644 python/samba/netcmd/domain/level.py create mode 100644 python/samba/netcmd/domain/models/__init__.py create mode 100644 python/samba/netcmd/domain/models/auth_policy.py create mode 100644 python/samba/netcmd/domain/models/auth_silo.py create mode 100644 python/samba/netcmd/domain/models/claim_type.py create mode 100644 python/samba/netcmd/domain/models/exceptions.py create mode 100644 python/samba/netcmd/domain/models/fields.py create mode 100644 python/samba/netcmd/domain/models/group.py create mode 100644 python/samba/netcmd/domain/models/model.py create mode 100644 python/samba/netcmd/domain/models/query.py create mode 100644 python/samba/netcmd/domain/models/schema.py create mode 100644 python/samba/netcmd/domain/models/site.py create mode 100644 python/samba/netcmd/domain/models/subnet.py create mode 100644 python/samba/netcmd/domain/models/user.py create mode 100644 python/samba/netcmd/domain/models/value_type.py create mode 100644 python/samba/netcmd/domain/passwordsettings.py create mode 100644 python/samba/netcmd/domain/provision.py create mode 100644 python/samba/netcmd/domain/samba3upgrade.py create mode 100644 python/samba/netcmd/domain/schemaupgrade.py create mode 100644 python/samba/netcmd/domain/tombstones.py create mode 100644 python/samba/netcmd/domain/trust.py create mode 100644 python/samba/netcmd/drs.py create mode 100644 python/samba/netcmd/dsacl.py create mode 100644 python/samba/netcmd/encoders.py create mode 100644 python/samba/netcmd/forest.py create mode 100644 python/samba/netcmd/fsmo.py create mode 100644 python/samba/netcmd/gpcommon.py create mode 100644 python/samba/netcmd/gpo.py create mode 100644 python/samba/netcmd/group.py create mode 100644 python/samba/netcmd/ldapcmp.py create mode 100644 python/samba/netcmd/main.py create mode 100644 python/samba/netcmd/nettime.py create mode 100644 python/samba/netcmd/ntacl.py create mode 100644 python/samba/netcmd/ou.py create mode 100644 python/samba/netcmd/processes.py create mode 100644 python/samba/netcmd/pso.py create mode 100644 python/samba/netcmd/rodc.py create mode 100644 python/samba/netcmd/schema.py create mode 100644 python/samba/netcmd/shell.py create mode 100644 python/samba/netcmd/sites.py create mode 100644 python/samba/netcmd/spn.py create mode 100644 python/samba/netcmd/testparm.py create mode 100644 python/samba/netcmd/user/__init__.py create mode 100644 python/samba/netcmd/user/add.py create mode 100644 python/samba/netcmd/user/add_unix_attrs.py create mode 100644 python/samba/netcmd/user/auth/__init__.py create mode 100644 python/samba/netcmd/user/auth/policy.py create mode 100644 python/samba/netcmd/user/auth/silo.py create mode 100644 python/samba/netcmd/user/delete.py create mode 100644 python/samba/netcmd/user/disable.py create mode 100644 python/samba/netcmd/user/edit.py create mode 100644 python/samba/netcmd/user/enable.py create mode 100644 python/samba/netcmd/user/getgroups.py create mode 100644 python/samba/netcmd/user/list.py create mode 100644 python/samba/netcmd/user/move.py create mode 100644 python/samba/netcmd/user/password.py create mode 100644 python/samba/netcmd/user/readpasswords/__init__.py create mode 100644 python/samba/netcmd/user/readpasswords/common.py create mode 100644 python/samba/netcmd/user/readpasswords/get_kerberos_ticket.py create mode 100644 python/samba/netcmd/user/readpasswords/getpassword.py create mode 100644 python/samba/netcmd/user/readpasswords/show.py create mode 100644 python/samba/netcmd/user/readpasswords/syncpasswords.py create mode 100644 python/samba/netcmd/user/rename.py create mode 100644 python/samba/netcmd/user/sensitive.py create mode 100644 python/samba/netcmd/user/setexpiry.py create mode 100644 python/samba/netcmd/user/setpassword.py create mode 100644 python/samba/netcmd/user/setprimarygroup.py create mode 100644 python/samba/netcmd/user/unlock.py create mode 100644 python/samba/netcmd/validators.py create mode 100644 python/samba/netcmd/visualize.py create mode 100644 python/samba/nt_time.py create mode 100644 python/samba/ntacls.py create mode 100644 python/samba/policies.py create mode 100644 python/samba/provision/__init__.py create mode 100644 python/samba/provision/backend.py create mode 100644 python/samba/provision/common.py create mode 100644 python/samba/provision/kerberos.py create mode 100644 python/samba/provision/sambadns.py create mode 100644 python/samba/remove_dc.py create mode 100644 python/samba/safe_tarfile.py create mode 100644 python/samba/samba3/__init__.py create mode 100644 python/samba/samba3/libsmb_samba_internal.py create mode 100644 python/samba/samdb.py create mode 100644 python/samba/schema.py create mode 100644 python/samba/sd_utils.py create mode 100644 python/samba/sites.py create mode 100644 python/samba/subnets.py create mode 100644 python/samba/subunit/__init__.py create mode 100755 python/samba/subunit/run.py create mode 100644 python/samba/tdb_util.py create mode 100644 python/samba/tests/__init__.py create mode 100644 python/samba/tests/audit_log_base.py create mode 100644 python/samba/tests/audit_log_dsdb.py create mode 100644 python/samba/tests/audit_log_pass_change.py create mode 100644 python/samba/tests/auth.py create mode 100755 python/samba/tests/auth_log.py create mode 100644 python/samba/tests/auth_log_base.py create mode 100644 python/samba/tests/auth_log_ncalrpc.py create mode 100644 python/samba/tests/auth_log_netlogon.py create mode 100644 python/samba/tests/auth_log_netlogon_bad_creds.py create mode 100644 python/samba/tests/auth_log_pass_change.py create mode 100644 python/samba/tests/auth_log_samlogon.py create mode 100644 python/samba/tests/auth_log_winbind.py create mode 100755 python/samba/tests/bin/cepces-submit create mode 100755 python/samba/tests/bin/crontab create mode 100755 python/samba/tests/bin/firewall-cmd create mode 100755 python/samba/tests/bin/getcert create mode 100755 python/samba/tests/bin/gio create mode 100644 python/samba/tests/blackbox/__init__.py create mode 100644 python/samba/tests/blackbox/bug13653.py create mode 100644 python/samba/tests/blackbox/check_output.py create mode 100755 python/samba/tests/blackbox/claims.py create mode 100644 python/samba/tests/blackbox/downgradedatabase.py create mode 100644 python/samba/tests/blackbox/mdsearch.py create mode 100644 python/samba/tests/blackbox/ndrdump.py create mode 100644 python/samba/tests/blackbox/netads_dns.py create mode 100644 python/samba/tests/blackbox/netads_json.py create mode 100755 python/samba/tests/blackbox/rpcd_witness_samba_only.py create mode 100644 python/samba/tests/blackbox/samba_dnsupdate.py create mode 100644 python/samba/tests/blackbox/smbcacls.py create mode 100644 python/samba/tests/blackbox/smbcacls_basic.py create mode 100644 python/samba/tests/blackbox/smbcacls_dfs_propagate_inherit.py create mode 100644 python/samba/tests/blackbox/smbcacls_propagate_inhertance.py create mode 100644 python/samba/tests/blackbox/smbcacls_save_restore.py create mode 100644 python/samba/tests/blackbox/smbcontrol.py create mode 100644 python/samba/tests/blackbox/smbcontrol_process.py create mode 100644 python/samba/tests/blackbox/testdata/traffic-sample-very-short.model create mode 100644 python/samba/tests/blackbox/testdata/traffic-sample-very-short.txt create mode 100644 python/samba/tests/blackbox/testdata/traffic_learner.expected create mode 100644 python/samba/tests/blackbox/testdata/traffic_replay-0.expected create mode 100644 python/samba/tests/blackbox/testdata/traffic_replay-1.expected create mode 100644 python/samba/tests/blackbox/testdata/traffic_replay-2.expected create mode 100644 python/samba/tests/blackbox/testdata/traffic_replay-3.expected create mode 100644 python/samba/tests/blackbox/testdata/traffic_replay.expected create mode 100644 python/samba/tests/blackbox/testdata/traffic_summary.expected create mode 100644 python/samba/tests/blackbox/testdata/traffic_summary.pdml create mode 100644 python/samba/tests/blackbox/traffic_learner.py create mode 100644 python/samba/tests/blackbox/traffic_replay.py create mode 100644 python/samba/tests/blackbox/traffic_summary.py create mode 100644 python/samba/tests/common.py create mode 100644 python/samba/tests/complex_expressions.py create mode 100644 python/samba/tests/compression.py create mode 100644 python/samba/tests/conditional_ace_assembler.py create mode 100644 python/samba/tests/conditional_ace_bytes.py create mode 100644 python/samba/tests/conditional_ace_claims.py create mode 100644 python/samba/tests/core.py create mode 100644 python/samba/tests/cred_opt.py create mode 100644 python/samba/tests/credentials.py create mode 100644 python/samba/tests/dcerpc/__init__.py create mode 100644 python/samba/tests/dcerpc/array.py create mode 100644 python/samba/tests/dcerpc/bare.py create mode 100644 python/samba/tests/dcerpc/binding.py create mode 100644 python/samba/tests/dcerpc/createtrustrelax.py create mode 100644 python/samba/tests/dcerpc/dnsserver.py create mode 100644 python/samba/tests/dcerpc/integer.py create mode 100644 python/samba/tests/dcerpc/lsa.py create mode 100644 python/samba/tests/dcerpc/mdssvc.py create mode 100644 python/samba/tests/dcerpc/misc.py create mode 100755 python/samba/tests/dcerpc/raw_protocol.py create mode 100644 python/samba/tests/dcerpc/raw_testcase.py create mode 100644 python/samba/tests/dcerpc/registry.py create mode 100644 python/samba/tests/dcerpc/rpc_talloc.py create mode 100644 python/samba/tests/dcerpc/rpcecho.py create mode 100644 python/samba/tests/dcerpc/sam.py create mode 100644 python/samba/tests/dcerpc/samr_change_password.py create mode 100644 python/samba/tests/dcerpc/srvsvc.py create mode 100644 python/samba/tests/dcerpc/string_tests.py create mode 100644 python/samba/tests/dcerpc/testrpc.py create mode 100644 python/samba/tests/dcerpc/unix.py create mode 100644 python/samba/tests/dckeytab.py create mode 100644 python/samba/tests/dns.py create mode 100644 python/samba/tests/dns_aging.py create mode 100644 python/samba/tests/dns_base.py create mode 100644 python/samba/tests/dns_forwarder.py create mode 100644 python/samba/tests/dns_forwarder_helpers/server.py create mode 100644 python/samba/tests/dns_invalid.py create mode 100644 python/samba/tests/dns_packet.py create mode 100644 python/samba/tests/dns_tkey.py create mode 100644 python/samba/tests/dns_wildcard.py create mode 100644 python/samba/tests/docs.py create mode 100644 python/samba/tests/domain_backup.py create mode 100644 python/samba/tests/domain_backup_offline.py create mode 100644 python/samba/tests/dsdb.py create mode 100644 python/samba/tests/dsdb_api.py create mode 100644 python/samba/tests/dsdb_dns.py create mode 100644 python/samba/tests/dsdb_lock.py create mode 100644 python/samba/tests/dsdb_schema_attributes.py create mode 100644 python/samba/tests/emulate/__init__.py create mode 100644 python/samba/tests/emulate/traffic.py create mode 100644 python/samba/tests/emulate/traffic_packet.py create mode 100644 python/samba/tests/encrypted_secrets.py create mode 100644 python/samba/tests/gensec.py create mode 100644 python/samba/tests/get_opt.py create mode 100644 python/samba/tests/getdcname.py create mode 100644 python/samba/tests/gkdi.py create mode 100644 python/samba/tests/glue.py create mode 100644 python/samba/tests/gpo.py create mode 100644 python/samba/tests/gpo_member.py create mode 100644 python/samba/tests/graph.py create mode 100644 python/samba/tests/group_audit.py create mode 100644 python/samba/tests/hostconfig.py create mode 100644 python/samba/tests/imports.py create mode 100644 python/samba/tests/join.py create mode 100644 python/samba/tests/kcc/__init__.py create mode 100644 python/samba/tests/kcc/graph.py create mode 100644 python/samba/tests/kcc/graph_utils.py create mode 100644 python/samba/tests/kcc/kcc_utils.py create mode 100644 python/samba/tests/kcc/ldif_import_export.py create mode 100755 python/samba/tests/krb5/alias_tests.py create mode 100755 python/samba/tests/krb5/as_canonicalization_tests.py create mode 100755 python/samba/tests/krb5/as_req_tests.py create mode 100755 python/samba/tests/krb5/authn_policy_tests.py create mode 100755 python/samba/tests/krb5/claims_in_pac.py create mode 100755 python/samba/tests/krb5/claims_tests.py create mode 100755 python/samba/tests/krb5/compatability_tests.py create mode 100755 python/samba/tests/krb5/conditional_ace_tests.py create mode 100755 python/samba/tests/krb5/device_tests.py create mode 100755 python/samba/tests/krb5/etype_tests.py create mode 100755 python/samba/tests/krb5/fast_tests.py create mode 100755 python/samba/tests/krb5/gkdi_tests.py create mode 100755 python/samba/tests/krb5/group_tests.py create mode 100755 python/samba/tests/krb5/kcrypto.py create mode 100644 python/samba/tests/krb5/kdc_base_test.py create mode 100755 python/samba/tests/krb5/kdc_tests.py create mode 100755 python/samba/tests/krb5/kdc_tgs_tests.py create mode 100755 python/samba/tests/krb5/kdc_tgt_tests.py create mode 100755 python/samba/tests/krb5/kpasswd_tests.py create mode 100755 python/samba/tests/krb5/lockout_tests.py create mode 100755 python/samba/tests/krb5/ms_kile_client_principal_lookup_tests.py create mode 100755 python/samba/tests/krb5/nt_hash_tests.py create mode 100755 python/samba/tests/krb5/pac_align_tests.py create mode 100755 python/samba/tests/krb5/pkinit_tests.py create mode 100755 python/samba/tests/krb5/protected_users_tests.py create mode 100755 python/samba/tests/krb5/pyasn1_regen.sh create mode 100644 python/samba/tests/krb5/raw_testcase.py create mode 100644 python/samba/tests/krb5/rfc4120.asn1 create mode 100644 python/samba/tests/krb5/rfc4120_constants.py create mode 100644 python/samba/tests/krb5/rfc4120_pyasn1.py create mode 100644 python/samba/tests/krb5/rfc4120_pyasn1_generated.py create mode 100755 python/samba/tests/krb5/rodc_tests.py create mode 100755 python/samba/tests/krb5/s4u_tests.py create mode 100755 python/samba/tests/krb5/salt_tests.py create mode 100755 python/samba/tests/krb5/simple_tests.py create mode 100755 python/samba/tests/krb5/spn_tests.py create mode 100755 python/samba/tests/krb5/test_ccache.py create mode 100755 python/samba/tests/krb5/test_idmap_nss.py create mode 100755 python/samba/tests/krb5/test_ldap.py create mode 100755 python/samba/tests/krb5/test_min_domain_uid.py create mode 100755 python/samba/tests/krb5/test_rpc.py create mode 100755 python/samba/tests/krb5/test_smb.py create mode 100755 python/samba/tests/krb5/xrealm_tests.py create mode 100644 python/samba/tests/krb5_credentials.py create mode 100644 python/samba/tests/ldap_raw.py create mode 100644 python/samba/tests/ldap_referrals.py create mode 100644 python/samba/tests/ldap_spn.py create mode 100644 python/samba/tests/ldap_upn_sam_account.py create mode 100644 python/samba/tests/ldap_whoami.py create mode 100644 python/samba/tests/libsmb-basic.py create mode 100644 python/samba/tests/libsmb.py create mode 100644 python/samba/tests/loadparm.py create mode 100644 python/samba/tests/logfiles.py create mode 100644 python/samba/tests/lsa_string.py create mode 100644 python/samba/tests/messaging.py create mode 100755 python/samba/tests/ndr/gkdi.py create mode 100755 python/samba/tests/ndr/gmsa.py create mode 100644 python/samba/tests/ndr/wbint.py create mode 100644 python/samba/tests/net_join.py create mode 100644 python/samba/tests/net_join_no_spnego.py create mode 100644 python/samba/tests/netbios.py create mode 100644 python/samba/tests/netcmd.py create mode 100644 python/samba/tests/netlogonsvc.py create mode 100644 python/samba/tests/ntacls.py create mode 100644 python/samba/tests/ntacls_backup.py create mode 100644 python/samba/tests/ntlm_auth.py create mode 100644 python/samba/tests/ntlm_auth_base.py create mode 100644 python/samba/tests/ntlm_auth_krb5.py create mode 100644 python/samba/tests/ntlmdisabled.py create mode 100644 python/samba/tests/pam_winbind.py create mode 100644 python/samba/tests/pam_winbind_chauthtok.py create mode 100644 python/samba/tests/pam_winbind_setcred.py create mode 100644 python/samba/tests/pam_winbind_warn_pwd_expire.py create mode 100644 python/samba/tests/param.py create mode 100644 python/samba/tests/password_hash.py create mode 100644 python/samba/tests/password_hash_fl2003.py create mode 100644 python/samba/tests/password_hash_fl2008.py create mode 100644 python/samba/tests/password_hash_gpgme.py create mode 100644 python/samba/tests/password_hash_ldap.py create mode 100644 python/samba/tests/password_quality.py create mode 100644 python/samba/tests/password_test.py create mode 100644 python/samba/tests/policy.py create mode 100644 python/samba/tests/posixacl.py create mode 100644 python/samba/tests/prefork_restart.py create mode 100644 python/samba/tests/process_limits.py create mode 100644 python/samba/tests/provision.py create mode 100644 python/samba/tests/pso.py create mode 100644 python/samba/tests/py_credentials.py create mode 100644 python/samba/tests/registry.py create mode 100644 python/samba/tests/reparsepoints.py create mode 100644 python/samba/tests/s3_net_join.py create mode 100644 python/samba/tests/s3idmapdb.py create mode 100644 python/samba/tests/s3param.py create mode 100644 python/samba/tests/s3passdb.py create mode 100644 python/samba/tests/s3registry.py create mode 100644 python/samba/tests/s3windb.py create mode 100644 python/samba/tests/safe_tarfile.py create mode 100644 python/samba/tests/samba3sam.py create mode 100644 python/samba/tests/samba_startup_fl_change.py create mode 100644 python/samba/tests/samba_tool/__init__.py create mode 100644 python/samba/tests/samba_tool/base.py create mode 100644 python/samba/tests/samba_tool/computer.py create mode 100755 python/samba/tests/samba_tool/computer_edit.sh create mode 100644 python/samba/tests/samba_tool/contact.py create mode 100755 python/samba/tests/samba_tool/contact_edit.sh create mode 100644 python/samba/tests/samba_tool/demote.py create mode 100644 python/samba/tests/samba_tool/dnscmd.py create mode 100644 python/samba/tests/samba_tool/domain_auth_policy.py create mode 100644 python/samba/tests/samba_tool/domain_auth_silo.py create mode 100644 python/samba/tests/samba_tool/domain_claim.py create mode 100644 python/samba/tests/samba_tool/domain_models.py create mode 100644 python/samba/tests/samba_tool/drs_clone_dc_data_lmdb_size.py create mode 100644 python/samba/tests/samba_tool/dsacl.py create mode 100644 python/samba/tests/samba_tool/forest.py create mode 100644 python/samba/tests/samba_tool/fsmo.py create mode 100644 python/samba/tests/samba_tool/gpo.py create mode 100644 python/samba/tests/samba_tool/gpo_exts.py create mode 100644 python/samba/tests/samba_tool/group.py create mode 100755 python/samba/tests/samba_tool/group_edit.sh create mode 100644 python/samba/tests/samba_tool/help.py create mode 100644 python/samba/tests/samba_tool/join.py create mode 100644 python/samba/tests/samba_tool/join_lmdb_size.py create mode 100644 python/samba/tests/samba_tool/join_member.py create mode 100644 python/samba/tests/samba_tool/ntacl.py create mode 100644 python/samba/tests/samba_tool/ou.py create mode 100644 python/samba/tests/samba_tool/passwordsettings.py create mode 100644 python/samba/tests/samba_tool/processes.py create mode 100644 python/samba/tests/samba_tool/promote_dc_lmdb_size.py create mode 100644 python/samba/tests/samba_tool/provision_lmdb_size.py create mode 100644 python/samba/tests/samba_tool/provision_password_check.py create mode 100644 python/samba/tests/samba_tool/provision_userPassword_crypt.py create mode 100644 python/samba/tests/samba_tool/rodc.py create mode 100644 python/samba/tests/samba_tool/schema.py create mode 100644 python/samba/tests/samba_tool/silo_base.py create mode 100644 python/samba/tests/samba_tool/sites.py create mode 100644 python/samba/tests/samba_tool/timecmd.py create mode 100644 python/samba/tests/samba_tool/user.py create mode 100644 python/samba/tests/samba_tool/user_auth_policy.py create mode 100644 python/samba/tests/samba_tool/user_auth_silo.py create mode 100644 python/samba/tests/samba_tool/user_check_password_script.py create mode 100755 python/samba/tests/samba_tool/user_edit.sh create mode 100644 python/samba/tests/samba_tool/user_get_kerberos_ticket.py create mode 100644 python/samba/tests/samba_tool/user_getpassword_gmsa.py create mode 100644 python/samba/tests/samba_tool/user_virtualCryptSHA.py create mode 100644 python/samba/tests/samba_tool/user_virtualCryptSHA_base.py create mode 100644 python/samba/tests/samba_tool/user_virtualCryptSHA_gpg.py create mode 100644 python/samba/tests/samba_tool/user_virtualCryptSHA_userPassword.py create mode 100644 python/samba/tests/samba_tool/user_wdigest.py create mode 100644 python/samba/tests/samba_tool/visualize.py create mode 100644 python/samba/tests/samba_tool/visualize_drs.py create mode 100644 python/samba/tests/samba_upgradedns_lmdb.py create mode 100644 python/samba/tests/samdb.py create mode 100644 python/samba/tests/samdb_api.py create mode 100644 python/samba/tests/sddl.py create mode 100644 python/samba/tests/sddl_conditional_ace.py create mode 100644 python/samba/tests/security.py create mode 100644 python/samba/tests/security_descriptors.py create mode 100644 python/samba/tests/segfault.py create mode 100644 python/samba/tests/sid_strings.py create mode 100755 python/samba/tests/smb-notify.py create mode 100644 python/samba/tests/smb.py create mode 100644 python/samba/tests/smb1posix.py create mode 100644 python/samba/tests/smb2symlink.py create mode 100644 python/samba/tests/smb3unix.py create mode 100644 python/samba/tests/smbconf.py create mode 100644 python/samba/tests/smbd_base.py create mode 100644 python/samba/tests/smbd_fuzztest.py create mode 100644 python/samba/tests/source.py create mode 100755 python/samba/tests/source_chars.py create mode 100644 python/samba/tests/strings.py create mode 100644 python/samba/tests/subunitrun.py create mode 100644 python/samba/tests/tdb_util.py create mode 100755 python/samba/tests/test_pam_winbind.sh create mode 100755 python/samba/tests/test_pam_winbind_chauthtok.sh create mode 100755 python/samba/tests/test_pam_winbind_setcred.sh create mode 100755 python/samba/tests/test_pam_winbind_warn_pwd_expire.sh create mode 100644 python/samba/tests/token_factory.py create mode 100644 python/samba/tests/upgrade.py create mode 100644 python/samba/tests/upgradeprovision.py create mode 100644 python/samba/tests/upgradeprovisionneeddc.py create mode 100644 python/samba/tests/usage.py create mode 100644 python/samba/tests/xattr.py create mode 100644 python/samba/trust_utils.py create mode 100644 python/samba/upgrade.py create mode 100644 python/samba/upgradehelpers.py create mode 100644 python/samba/uptodateness.py create mode 100644 python/samba/xattr.py (limited to 'python/samba') diff --git a/python/samba/__init__.py b/python/samba/__init__.py new file mode 100644 index 0000000..3e6ea7d --- /dev/null +++ b/python/samba/__init__.py @@ -0,0 +1,400 @@ +# Unix SMB/CIFS implementation. +# Copyright (C) Jelmer Vernooij 2007-2008 +# +# Based on the original in EJS: +# Copyright (C) Andrew Tridgell 2005 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +"""Samba 4.""" + +__docformat__ = "restructuredText" + +import os +import time +import ldb +import samba.param +from samba import _glue +from samba._ldb import Ldb as _Ldb + + +def source_tree_topdir(): + """Return the top level source directory.""" + paths = ["../../..", "../../../.."] + for p in paths: + topdir = os.path.normpath(os.path.join(os.path.dirname(__file__), p)) + if os.path.exists(os.path.join(topdir, 'source4')): + return topdir + raise RuntimeError("unable to find top level source directory") + + +def in_source_tree(): + """Return True if we are running from within the samba source tree""" + try: + topdir = source_tree_topdir() + except RuntimeError: + return False + return True + + +class Ldb(_Ldb): + """Simple Samba-specific LDB subclass that takes care + of setting up the modules dir, credentials pointers, etc. + + Please note that this is intended to be for all Samba LDB files, + not necessarily the Sam database. For Sam-specific helper + functions see samdb.py. + """ + + def __init__(self, url=None, lp=None, modules_dir=None, session_info=None, + credentials=None, flags=0, options=None): + """Opens a Samba Ldb file. + + :param url: Optional LDB URL to open + :param lp: Optional loadparm object + :param modules_dir: Optional modules directory + :param session_info: Optional session information + :param credentials: Optional credentials, defaults to anonymous. + :param flags: Optional LDB flags + :param options: Additional options (optional) + + This is different from a regular Ldb file in that the Samba-specific + modules-dir is used by default and that credentials and session_info + can be passed through (required by some modules). + """ + + if modules_dir is not None: + self.set_modules_dir(modules_dir) + else: + self.set_modules_dir(os.path.join(samba.param.modules_dir(), "ldb")) + + if session_info is not None: + self.set_session_info(session_info) + + if credentials is not None: + self.set_credentials(credentials) + + if lp is not None: + self.set_loadparm(lp) + + # This must be done before we load the schema, as these handlers for + # objectSid and objectGUID etc must take precedence over the 'binary + # attribute' declaration in the schema + self.register_samba_handlers() + + # TODO set debug + def msg(l, text): + print(text) + # self.set_debug(msg) + + self.set_utf8_casefold() + + # Allow admins to force non-sync ldb for all databases + if lp is not None: + nosync_p = lp.get("ldb:nosync") + if nosync_p is not None and nosync_p: + flags |= ldb.FLG_NOSYNC + + self.set_create_perms(0o600) + + if url is not None: + self.connect(url, flags, options) + + def searchone(self, attribute, basedn=None, expression=None, + scope=ldb.SCOPE_BASE): + """Search for one attribute as a string. + + :param basedn: BaseDN for the search. + :param attribute: Name of the attribute + :param expression: Optional search expression. + :param scope: Search scope (defaults to base). + :return: Value of attribute as a string or None if it wasn't found. + """ + res = self.search(basedn, scope, expression, [attribute]) + if len(res) != 1 or res[0][attribute] is None: + return None + values = set(res[0][attribute]) + assert len(values) == 1 + return self.schema_format_value(attribute, values.pop()) + + def erase_users_computers(self, dn): + """Erases user and computer objects from our AD. + + This is needed since the 'samldb' module denies the deletion of primary + groups. Therefore all groups shouldn't be primary somewhere anymore. + """ + + try: + res = self.search(base=dn, scope=ldb.SCOPE_SUBTREE, attrs=[], + expression="(|(objectclass=user)(objectclass=computer))") + except ldb.LdbError as error: + (errno, estr) = error.args + if errno == ldb.ERR_NO_SUCH_OBJECT: + # Ignore no such object errors + return + else: + raise + + try: + for msg in res: + self.delete(msg.dn, ["relax:0"]) + except ldb.LdbError as error: + (errno, estr) = error.args + if errno != ldb.ERR_NO_SUCH_OBJECT: + # Ignore no such object errors + raise + + def erase_except_schema_controlled(self): + """Erase this ldb. + + :note: Removes all records, except those that are controlled by + Samba4's schema. + """ + + basedn = "" + + # Try to delete user/computer accounts to allow deletion of groups + self.erase_users_computers(basedn) + + # Delete the 'visible' records, and the invisible 'deleted' records (if + # this DB supports it) + for msg in self.search(basedn, ldb.SCOPE_SUBTREE, + "(&(|(objectclass=*)(distinguishedName=*))(!(distinguishedName=@BASEINFO)))", + [], controls=["show_deleted:0", "show_recycled:0"]): + try: + self.delete(msg.dn, ["relax:0"]) + except ldb.LdbError as error: + (errno, estr) = error.args + if errno != ldb.ERR_NO_SUCH_OBJECT: + # Ignore no such object errors + raise + + res = self.search(basedn, ldb.SCOPE_SUBTREE, + "(&(|(objectclass=*)(distinguishedName=*))(!(distinguishedName=@BASEINFO)))", + [], controls=["show_deleted:0", "show_recycled:0"]) + assert len(res) == 0 + + # delete the specials + for attr in ["@SUBCLASSES", "@MODULES", + "@OPTIONS", "@PARTITION", "@KLUDGEACL"]: + try: + self.delete(attr, ["relax:0"]) + except ldb.LdbError as error: + (errno, estr) = error.args + if errno != ldb.ERR_NO_SUCH_OBJECT: + # Ignore missing dn errors + raise + + def erase(self): + """Erase this ldb, removing all records.""" + self.erase_except_schema_controlled() + + # delete the specials + for attr in ["@INDEXLIST", "@ATTRIBUTES"]: + try: + self.delete(attr, ["relax:0"]) + except ldb.LdbError as error: + (errno, estr) = error.args + if errno != ldb.ERR_NO_SUCH_OBJECT: + # Ignore missing dn errors + raise + + def load_ldif_file_add(self, ldif_path): + """Load a LDIF file. + + :param ldif_path: Path to LDIF file. + """ + with open(ldif_path, 'r') as ldif_file: + self.add_ldif(ldif_file.read()) + + def add_ldif(self, ldif, controls=None): + """Add data based on a LDIF string. + + :param ldif: LDIF text. + """ + for changetype, msg in self.parse_ldif(ldif): + assert changetype == ldb.CHANGETYPE_NONE + self.add(msg, controls) + + def modify_ldif(self, ldif, controls=None): + """Modify database based on a LDIF string. + + :param ldif: LDIF text. + """ + for changetype, msg in self.parse_ldif(ldif): + if changetype == ldb.CHANGETYPE_NONE: + changetype = ldb.CHANGETYPE_MODIFY + + if changetype == ldb.CHANGETYPE_ADD: + self.add(msg, controls) + elif changetype == ldb.CHANGETYPE_MODIFY: + self.modify(msg, controls) + elif changetype == ldb.CHANGETYPE_DELETE: + deldn = msg + self.delete(deldn, controls) + elif changetype == ldb.CHANGETYPE_MODRDN: + olddn = msg["olddn"] + deleteoldrdn = msg["deleteoldrdn"] + newdn = msg["newdn"] + if deleteoldrdn is False: + raise ValueError("Invalid ldb.CHANGETYPE_MODRDN with deleteoldrdn=False") + self.rename(olddn, newdn, controls) + else: + raise ValueError("Invalid ldb.CHANGETYPE_%u: %s" % (changetype, msg)) + + +def substitute_var(text, values): + """Substitute strings of the form ${NAME} in str, replacing + with substitutions from values. + + :param text: Text in which to substitute. + :param values: Dictionary with keys and values. + """ + + for (name, value) in values.items(): + assert isinstance(name, str), "%r is not a string" % name + assert isinstance(value, str), "Value %r for %s is not a string" % (value, name) + text = text.replace("${%s}" % name, value) + + return text + + +def check_all_substituted(text): + """Check that all substitution variables in a string have been replaced. + + If not, raise an exception. + + :param text: The text to search for substitution variables + """ + if "${" not in text: + return + + var_start = text.find("${") + var_end = text.find("}", var_start) + + raise Exception("Not all variables substituted: %s" % + text[var_start:var_end + 1]) + + +def read_and_sub_file(file_name, subst_vars): + """Read a file and sub in variables found in it + + :param file_name: File to be read (typically from setup directory) + :param subst_vars: Optional variables to substitute in the file. + """ + with open(file_name, 'r', encoding="utf-8") as data_file: + data = data_file.read() + if subst_vars is not None: + data = substitute_var(data, subst_vars) + check_all_substituted(data) + return data + + +def setup_file(template, fname, subst_vars=None): + """Setup a file in the private dir. + + :param template: Path of the template file. + :param fname: Path of the file to create. + :param subst_vars: Substitution variables. + """ + if os.path.exists(fname): + os.unlink(fname) + + data = read_and_sub_file(template, subst_vars) + f = open(fname, 'w') + try: + f.write(data) + finally: + f.close() + + +MAX_NETBIOS_NAME_LEN = 15 + + +def is_valid_netbios_char(c): + return (c.isalnum() or c in " !#$%&'()-.@^_{}~") + + +def valid_netbios_name(name): + """Check whether a name is valid as a NetBIOS name. """ + # See crh's book (1.4.1.1) + if len(name) > MAX_NETBIOS_NAME_LEN: + return False + for x in name: + if not is_valid_netbios_char(x): + return False + return True + + +def dn_from_dns_name(dnsdomain): + """return a DN from a DNS name domain/forest root""" + return "DC=" + ",DC=".join(dnsdomain.split(".")) + + +def current_unix_time(): + return int(time.time()) + + +def string_to_byte_array(string): + return [c if isinstance(c, int) else ord(c) for c in string] + + +def arcfour_encrypt(key, data): + from samba.crypto import arcfour_crypt_blob + return arcfour_crypt_blob(data, key) + + +def enable_net_export_keytab(): + """This function modifies the samba.net.Net class to contain + an export_keytab() method.""" + # This looks very strange because it is. + # + # The dckeytab modules contains nothing, but the act of importing + # it pushes a method into samba.net.Net. It ended up this way + # because Net.export_keytab() only works on Heimdal builds, and + # people sometimes want to compile Samba without Heimdal while + # still having a working samba-tool. + # + # There is probably a better way to do this than a magic module + # import (yes, that's a FIXME if you can be bothered). + from samba import net + from samba import dckeytab + + +version = _glue.version +interface_ips = _glue.interface_ips +fault_setup = _glue.fault_setup +set_debug_level = _glue.set_debug_level +get_debug_level = _glue.get_debug_level +float2nttime = _glue.float2nttime +nttime2float = _glue.nttime2float +nttime2string = _glue.nttime2string +nttime2unix = _glue.nttime2unix +unix2nttime = _glue.unix2nttime +generate_random_password = _glue.generate_random_password +generate_random_machine_password = _glue.generate_random_machine_password +check_password_quality = _glue.check_password_quality +generate_random_bytes = _glue.generate_random_bytes +strcasecmp_m = _glue.strcasecmp_m +strstr_m = _glue.strstr_m +is_ntvfs_fileserver_built = _glue.is_ntvfs_fileserver_built +is_heimdal_built = _glue.is_heimdal_built +is_ad_dc_built = _glue.is_ad_dc_built +is_selftest_enabled = _glue.is_selftest_enabled + +NTSTATUSError = _glue.NTSTATUSError +HRESULTError = _glue.HRESULTError +WERRORError = _glue.WERRORError +DsExtendedError = _glue.DsExtendedError diff --git a/python/samba/auth_util.py b/python/samba/auth_util.py new file mode 100644 index 0000000..f616bb4 --- /dev/null +++ b/python/samba/auth_util.py @@ -0,0 +1,34 @@ +# Unix SMB/CIFS implementation. +# auth util helpers +# +# Copyright (C) Ralph Boehme 2019 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from samba.auth import ( + system_session, + session_info_fill_unix, + copy_session_info, +) + +def system_session_unix(): + """ + Return a copy of the system session_info with a valid UNIX token + """ + + session_info = system_session() + session_info_unix = copy_session_info(session_info) + session_info_fill_unix(session_info_unix, None) + + return session_info_unix diff --git a/python/samba/colour.py b/python/samba/colour.py new file mode 100644 index 0000000..1fb6f24 --- /dev/null +++ b/python/samba/colour.py @@ -0,0 +1,175 @@ +# ANSI codes for 4 bit and xterm-256color +# +# Copyright (C) Andrew Bartlett 2018 +# +# Originally written by Douglas Bagnall +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +# The 4 bit colours are available as global variables with names like +# RED, DARK_RED, REV_RED (for red background), and REV_DARK_RED. If +# switch_colour_off() is called, these names will all point to the +# empty string. switch_colour_on() restores the default values. +# +# The 256-colour codes are obtained using xterm_256_color(n), where n +# is the number of the desired colour. + + +def _gen_ansi_colours(): + g = globals() + for i, name in enumerate(('BLACK', 'RED', 'GREEN', 'YELLOW', 'BLUE', + 'MAGENTA', 'CYAN', 'WHITE')): + g[name] = "\033[1;3%dm" % i + g['DARK_' + name] = "\033[3%dm" % i + g['REV_' + name] = "\033[1;4%dm" % i + g['REV_DARK_' + name] = "\033[4%dm" % i + + # kcc.debug uses these aliases (which make visual sense) + g['PURPLE'] = DARK_MAGENTA + g['GREY'] = DARK_WHITE + + # C_NORMAL resets to normal, whatever that is + g['C_NORMAL'] = "\033[0m" + + # Non-colour ANSI codes. + g['UNDERLINE'] = "\033[4m" + + +_gen_ansi_colours() + +# Generate functions that colour a string. The functions look like +# this: +# +# c_BLUE("hello") # "\033[1;34mhello\033[0m" -> blue text +# c_DARK_RED(3) # 3 will be stringified and coloured +# +# but if colour is switched off, no colour codes are added. +# +# c_BLUE("hello") # "hello" +# +# The definition of the functions looks a little odd, because we want +# to bake in the name of the colour but not its actual value. + +for _k in list(globals().keys()): + if _k.isupper(): + def _f(s, name=_k): + return "%s%s%s" % (globals()[name], s, C_NORMAL) + globals()['c_%s' % _k] = _f + +del _k, _f + + +def switch_colour_off(): + """Convert all the ANSI colour codes into empty strings.""" + g = globals() + for k, v in list(g.items()): + if k.isupper() and isinstance(v, str) and v.startswith('\033'): + g[k] = '' + + +def switch_colour_on(): + """Regenerate all the ANSI colour codes.""" + _gen_ansi_colours() + + +def xterm_256_colour(n, bg=False, bold=False): + weight = '01;' if bold else '' + target = '48' if bg else '38' + + return "\033[%s%s;5;%dm" % (weight, target, int(n)) + + +def is_colour_wanted(*streams, hint='auto'): + """The hint is presumably a --color argument. + + The streams to be considered can be file objects or file names, + with '-' being a special filename indicating stdout. + + We follow the behaviour of GNU `ls` in what we accept. + * `git` is stricter, accepting only {always,never,auto}. + * `grep` is looser, accepting mixed case variants. + * historically we have used {yes,no,auto}. + * {always,never,auto} appears the commonest convention. + * if the caller tries to opt out of choosing and sets hint to None + or '', we assume 'auto'. + """ + if hint in ('no', 'never', 'none'): + return False + + if hint in ('yes', 'always', 'force'): + return True + + if hint not in ('auto', 'tty', 'if-tty', None, ''): + raise ValueError(f"unexpected colour hint: {hint}; " + "try always|never|auto") + + from os import environ + if environ.get('NO_COLOR'): + # Note: per spec, we treat the empty string as if unset. + return False + + for stream in streams: + if isinstance(stream, str): + # This function can be passed filenames instead of file + # objects, in which case we treat '-' as stdout, and test + # that. Any other string is not regarded as a tty. + if stream != '-': + return False + import sys + stream = sys.stdout + + if not stream.isatty(): + return False + return True + + +def colour_if_wanted(*streams, hint='auto'): + wanted = is_colour_wanted(*streams, hint=hint) + if wanted: + switch_colour_on() + else: + switch_colour_off() + return wanted + + +def colourdiff(a, b): + """Generate a string comparing two strings or byte sequences, with + differences coloured to indicate what changed. + + Byte sequences are printed as hex pairs separated by colons. + """ + from difflib import SequenceMatcher + out = [] + if isinstance(a, bytes): + a = a.hex(':') + if isinstance(b, bytes): + b = b.hex(':') + a = a.replace(' ', '␠') + b = b.replace(' ', '␠') + + s = SequenceMatcher(None, a, b) + for op, al, ar, bl, br in s.get_opcodes(): + if op == 'equal': + out.append(a[al: ar]) + elif op == 'delete': + out.append(c_RED(a[al: ar])) + elif op == 'insert': + out.append(c_GREEN(b[bl: br])) + elif op == 'replace': + out.append(c_RED(a[al: ar])) + out.append(c_GREEN(b[bl: br])) + else: + out.append(f' --unknown diff op {op}!-- ') + + return ''.join(out) diff --git a/python/samba/common.py b/python/samba/common.py new file mode 100644 index 0000000..c46f6cb --- /dev/null +++ b/python/samba/common.py @@ -0,0 +1,107 @@ +# Samba common functions +# +# Copyright (C) Matthieu Patou +# Copyright (C) Lumir Balhar 2017 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + + +def cmp(x, y): + """ + Replacement for built-in function cmp that was removed in Python 3 + + Compare the two objects x and y and return an integer according to + the outcome. The return value is negative if x < y, zero if x == y + and strictly positive if x > y. + """ + + return (x > y) - (x < y) + + +def confirm(msg, forced=False, allow_all=False): + """confirm an action with the user + + :param msg: A string to print to the user + :param forced: Are the answer forced + """ + if forced: + print("%s [YES]" % msg) + return True + + mapping = { + 'Y': True, + 'YES': True, + '': False, + 'N': False, + 'NO': False, + } + + prompt = '[y/N]' + + if allow_all: + mapping['ALL'] = 'ALL' + mapping['NONE'] = 'NONE' + prompt = '[y/N/all/none]' + + while True: + v = input(msg + ' %s ' % prompt) + v = v.upper() + if v in mapping: + return mapping[v] + print("Unknown response '%s'" % v) + + +def normalise_int32(ivalue): + """normalise a ldap integer to signed 32 bit""" + if int(ivalue) & 0x80000000 and int(ivalue) > 0: + return str(int(ivalue) - 0x100000000) + return str(ivalue) + + +# Sometimes in PY3 we have variables whose content can be 'bytes' or +# 'str' and we can't be sure which. Generally this is because the +# code variable can be initialised (or reassigned) a value from different +# api(s) or functions depending on complex conditions or logic. Or another +# common case is in PY2 the variable is 'type ' and in PY3 it is +# 'class ' and the function to use e.g. b64encode requires 'bytes' +# in PY3. In such cases it would be nice to avoid excessive testing in +# the client code. Calling such a helper function should be avoided +# if possible but sometimes this just isn't possible. +# If a 'str' object is passed in it is encoded using 'utf8' or if 'bytes' +# is passed in it is returned unchanged. +# Using this function is PY2/PY3 code should ensure in most cases +# the PY2 code runs unchanged in PY2 whereas the code in PY3 possibly +# encodes the variable (see PY2 implementation of this function below) +def get_bytes(bytesorstring): + tmp = bytesorstring + if isinstance(bytesorstring, str): + tmp = bytesorstring.encode('utf8') + elif not isinstance(bytesorstring, bytes): + raise ValueError('Expected bytes or string for %s:%s' % (type(bytesorstring), bytesorstring)) + return tmp + +# helper function to get a string from a variable that maybe 'str' or +# 'bytes' if 'bytes' then it is decoded using 'utf8'. If 'str' is passed +# it is returned unchanged +# Using this function is PY2/PY3 code should ensure in most cases +# the PY2 code runs unchanged in PY2 whereas the code in PY3 possibly +# decodes the variable (see PY2 implementation of this function below) +def get_string(bytesorstring): + tmp = bytesorstring + if isinstance(bytesorstring, bytes): + tmp = bytesorstring.decode('utf8') + elif not isinstance(bytesorstring, str): + raise ValueError('Expected bytes or string for %s:%s' % (type(bytesorstring), bytesorstring)) + return tmp diff --git a/python/samba/dbchecker.py b/python/samba/dbchecker.py new file mode 100644 index 0000000..e07afdc --- /dev/null +++ b/python/samba/dbchecker.py @@ -0,0 +1,2935 @@ +# Samba4 AD database checker +# +# Copyright (C) Andrew Tridgell 2011 +# Copyright (C) Matthieu Patou 2011 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import ldb +import samba +import time +from base64 import b64decode, b64encode +from samba import dsdb +from samba import common +from samba.dcerpc import misc +from samba.dcerpc import drsuapi +from samba.ndr import ndr_unpack, ndr_pack +from samba.dcerpc import drsblobs +from samba.samdb import dsdb_Dn +from samba.dcerpc import security +from samba.descriptor import ( + get_wellknown_sds, + get_deletedobjects_descriptor, + get_diff_sds +) +from samba.auth import system_session, admin_session +from samba.netcmd import CommandError +from samba.netcmd.fsmo import get_fsmo_roleowner +from samba.colour import c_RED, c_DARK_YELLOW, c_DARK_CYAN, c_DARK_GREEN + +def dump_attr_values(vals): + """Stringify a value list, using utf-8 if possible (which some tests + want), or the python bytes representation otherwise (with leading + 'b' and escapes like b'\x00'). + """ + result = [] + for value in vals: + try: + result.append(value.decode('utf-8')) + except UnicodeDecodeError: + result.append(repr(value)) + return ','.join(result) + + +class dbcheck(object): + """check a SAM database for errors""" + + def __init__(self, samdb, samdb_schema=None, verbose=False, fix=False, + yes=False, quiet=False, in_transaction=False, + quick_membership_checks=False, + reset_well_known_acls=False, + check_expired_tombstones=False, + colour=False): + self.samdb = samdb + self.dict_oid_name = None + self.samdb_schema = (samdb_schema or samdb) + self.verbose = verbose + self.fix = fix + self.yes = yes + self.quiet = quiet + self.colour = colour + self.remove_all_unknown_attributes = False + self.remove_all_empty_attributes = False + self.fix_all_normalisation = False + self.fix_all_duplicates = False + self.fix_all_DN_GUIDs = False + self.fix_all_binary_dn = False + self.remove_implausible_deleted_DN_links = False + self.remove_plausible_deleted_DN_links = False + self.fix_all_string_dn_component_mismatch = False + self.fix_all_GUID_dn_component_mismatch = False + self.fix_all_SID_dn_component_mismatch = False + self.fix_all_SID_dn_component_missing = False + self.fix_all_old_dn_string_component_mismatch = False + self.fix_all_metadata = False + self.fix_time_metadata = False + self.fix_undead_linked_attributes = False + self.fix_all_missing_backlinks = False + self.fix_all_orphaned_backlinks = False + self.fix_all_missing_forward_links = False + self.duplicate_link_cache = dict() + self.recover_all_forward_links = False + self.fix_rmd_flags = False + self.fix_ntsecuritydescriptor = False + self.fix_ntsecuritydescriptor_owner_group = False + self.seize_fsmo_role = False + self.move_to_lost_and_found = False + self.fix_instancetype = False + self.fix_replmetadata_zero_invocationid = False + self.fix_replmetadata_duplicate_attid = False + self.fix_replmetadata_wrong_attid = False + self.fix_replmetadata_unsorted_attid = False + self.fix_deleted_deleted_objects = False + self.fix_dn = False + self.fix_base64_userparameters = False + self.fix_utf8_userparameters = False + self.fix_doubled_userparameters = False + self.fix_sid_rid_set_conflict = False + self.quick_membership_checks = quick_membership_checks + self.reset_well_known_acls = reset_well_known_acls + self.check_expired_tombstones = check_expired_tombstones + self.expired_tombstones = 0 + self.reset_all_well_known_acls = False + self.in_transaction = in_transaction + self.infrastructure_dn = ldb.Dn(samdb, "CN=Infrastructure," + samdb.domain_dn()) + self.naming_dn = ldb.Dn(samdb, "CN=Partitions,%s" % samdb.get_config_basedn()) + self.schema_dn = samdb.get_schema_basedn() + self.rid_dn = ldb.Dn(samdb, "CN=RID Manager$,CN=System," + samdb.domain_dn()) + self.ntds_dsa = ldb.Dn(samdb, samdb.get_dsServiceName()) + self.class_schemaIDGUID = {} + self.wellknown_sds = get_wellknown_sds(self.samdb) + self.fix_all_missing_objectclass = False + self.fix_missing_deleted_objects = False + self.fix_replica_locations = False + self.fix_missing_rid_set_master = False + self.fix_changes_after_deletion_bug = False + + self.dn_set = set() + self.link_id_cache = {} + self.name_map = {} + try: + base_dn = "CN=DnsAdmins,%s" % samdb.get_wellknown_dn( + samdb.get_default_basedn(), + dsdb.DS_GUID_USERS_CONTAINER) + res = samdb.search(base=base_dn, scope=ldb.SCOPE_BASE, + attrs=["objectSid"]) + dnsadmins_sid = ndr_unpack(security.dom_sid, res[0]["objectSid"][0]) + self.name_map['DnsAdmins'] = str(dnsadmins_sid) + except ldb.LdbError as e5: + (enum, estr) = e5.args + if enum != ldb.ERR_NO_SUCH_OBJECT: + raise + + self.system_session_info = system_session() + self.admin_session_info = admin_session(None, samdb.get_domain_sid()) + + res = self.samdb.search(base=self.ntds_dsa, scope=ldb.SCOPE_BASE, attrs=['msDS-hasMasterNCs', 'hasMasterNCs']) + if "msDS-hasMasterNCs" in res[0]: + self.write_ncs = res[0]["msDS-hasMasterNCs"] + else: + # If the Forest Level is less than 2003 then there is no + # msDS-hasMasterNCs, so we fall back to hasMasterNCs + # no need to merge as all the NCs that are in hasMasterNCs must + # also be in msDS-hasMasterNCs (but not the opposite) + if "hasMasterNCs" in res[0]: + self.write_ncs = res[0]["hasMasterNCs"] + else: + self.write_ncs = None + + res = self.samdb.search(base="", scope=ldb.SCOPE_BASE, attrs=['namingContexts']) + self.deleted_objects_containers = [] + self.ncs_lacking_deleted_containers = [] + self.dns_partitions = [] + try: + self.ncs = res[0]["namingContexts"] + except KeyError: + pass + except IndexError: + pass + + for nc in self.ncs: + try: + dn = self.samdb.get_wellknown_dn(ldb.Dn(self.samdb, nc.decode('utf8')), + dsdb.DS_GUID_DELETED_OBJECTS_CONTAINER) + self.deleted_objects_containers.append(dn) + except KeyError: + self.ncs_lacking_deleted_containers.append(ldb.Dn(self.samdb, nc.decode('utf8'))) + + domaindns_zone = 'DC=DomainDnsZones,%s' % self.samdb.get_default_basedn() + forestdns_zone = 'DC=ForestDnsZones,%s' % self.samdb.get_root_basedn() + domain = self.samdb.search(scope=ldb.SCOPE_ONELEVEL, + attrs=["msDS-NC-Replica-Locations", "msDS-NC-RO-Replica-Locations"], + base=self.samdb.get_partitions_dn(), + expression="(&(objectClass=crossRef)(ncName=%s))" % domaindns_zone) + if len(domain) == 1: + self.dns_partitions.append((ldb.Dn(self.samdb, forestdns_zone), domain[0])) + + forest = self.samdb.search(scope=ldb.SCOPE_ONELEVEL, + attrs=["msDS-NC-Replica-Locations", "msDS-NC-RO-Replica-Locations"], + base=self.samdb.get_partitions_dn(), + expression="(&(objectClass=crossRef)(ncName=%s))" % forestdns_zone) + if len(forest) == 1: + self.dns_partitions.append((ldb.Dn(self.samdb, domaindns_zone), forest[0])) + + fsmo_dn = ldb.Dn(self.samdb, "CN=RID Manager$,CN=System," + self.samdb.domain_dn()) + rid_master = get_fsmo_roleowner(self.samdb, fsmo_dn, "rid") + if ldb.Dn(self.samdb, self.samdb.get_dsServiceName()) == rid_master: + self.is_rid_master = True + else: + self.is_rid_master = False + + # To get your rid set + # 1. Get server name + res = self.samdb.search(base=ldb.Dn(self.samdb, self.samdb.get_serverName()), + scope=ldb.SCOPE_BASE, attrs=["serverReference"]) + # 2. Get server reference + self.server_ref_dn = ldb.Dn(self.samdb, res[0]['serverReference'][0].decode('utf8')) + + # 3. Get RID Set + res = self.samdb.search(base=self.server_ref_dn, + scope=ldb.SCOPE_BASE, attrs=['rIDSetReferences']) + if "rIDSetReferences" in res[0]: + self.rid_set_dn = ldb.Dn(self.samdb, res[0]['rIDSetReferences'][0].decode('utf8')) + else: + self.rid_set_dn = None + + ntds_service_dn = "CN=Directory Service,CN=Windows NT,CN=Services,%s" % \ + self.samdb.get_config_basedn().get_linearized() + res = samdb.search(base=ntds_service_dn, + scope=ldb.SCOPE_BASE, + expression="(objectClass=nTDSService)", + attrs=["tombstoneLifetime"]) + if "tombstoneLifetime" in res[0]: + self.tombstoneLifetime = int(res[0]["tombstoneLifetime"][0]) + else: + self.tombstoneLifetime = 180 + + self.compatibleFeatures = [] + self.requiredFeatures = [] + + try: + res = self.samdb.search(scope=ldb.SCOPE_BASE, + base="@SAMBA_DSDB", + attrs=["compatibleFeatures", + "requiredFeatures"]) + if "compatibleFeatures" in res[0]: + self.compatibleFeatures = res[0]["compatibleFeatures"] + if "requiredFeatures" in res[0]: + self.requiredFeatures = res[0]["requiredFeatures"] + except ldb.LdbError as e6: + (enum, estr) = e6.args + if enum != ldb.ERR_NO_SUCH_OBJECT: + raise + + def check_database(self, DN=None, scope=ldb.SCOPE_SUBTREE, controls=None, + attrs=None): + """perform a database check, returning the number of errors found""" + res = self.samdb.search(base=DN, scope=scope, attrs=['dn'], controls=controls) + self.report('Checking %u objects' % len(res)) + error_count = 0 + self.unfixable_errors = 0 + + error_count += self.check_deleted_objects_containers() + + self.attribute_or_class_ids = set() + + for object in res: + self.dn_set.add(str(object.dn)) + error_count += self.check_object(object.dn, requested_attrs=attrs) + + if DN is None: + error_count += self.check_rootdse() + + if self.expired_tombstones > 0: + self.report("NOTICE: found %d expired tombstones, " + "'samba' will remove them daily, " + "'samba-tool domain tombstones expunge' " + "would do that immediately." % ( + self.expired_tombstones)) + + self.report('Checked %u objects (%u errors)' % + (len(res), error_count + self.unfixable_errors)) + + if self.unfixable_errors != 0: + self.report(f"WARNING: {self.unfixable_errors} " + "of these errors cannot be automatically fixed.") + + if error_count != 0 and not self.fix: + self.report("Please use 'samba-tool dbcheck --fix' to fix " + f"{error_count} errors") + + return error_count + + def check_deleted_objects_containers(self): + """This function only fixes conflicts on the Deleted Objects + containers, not the attributes""" + error_count = 0 + for nc in self.ncs_lacking_deleted_containers: + if nc == self.schema_dn: + continue + error_count += 1 + self.report("ERROR: NC %s lacks a reference to a Deleted Objects container" % nc) + if not self.confirm_all('Fix missing Deleted Objects container for %s?' % (nc), 'fix_missing_deleted_objects'): + continue + + dn = ldb.Dn(self.samdb, "CN=Deleted Objects") + dn.add_base(nc) + + conflict_dn = None + try: + # If something already exists here, add a conflict + res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE, attrs=[], + controls=["show_deleted:1", "extended_dn:1:1", + "show_recycled:1", "reveal_internals:0"]) + if len(res) != 0: + guid = res[0].dn.get_extended_component("GUID") + conflict_dn = ldb.Dn(self.samdb, + "CN=Deleted Objects\\0ACNF:%s" % str(misc.GUID(guid))) + conflict_dn.add_base(nc) + + except ldb.LdbError as e2: + (enum, estr) = e2.args + if enum == ldb.ERR_NO_SUCH_OBJECT: + pass + else: + self.report("Couldn't check for conflicting Deleted Objects container: %s" % estr) + return 1 + + if conflict_dn is not None: + try: + self.samdb.rename(dn, conflict_dn, ["show_deleted:1", "relax:0", "show_recycled:1"]) + except ldb.LdbError as e1: + (enum, estr) = e1.args + self.report("Couldn't move old Deleted Objects placeholder: %s to %s: %s" % (dn, conflict_dn, estr)) + return 1 + + # Refresh wellKnownObjects links + res = self.samdb.search(base=nc, scope=ldb.SCOPE_BASE, + attrs=['wellKnownObjects'], + controls=["show_deleted:1", "extended_dn:0", + "show_recycled:1", "reveal_internals:0"]) + if len(res) != 1: + self.report("wellKnownObjects was not found for NC %s" % nc) + return 1 + + # Prevent duplicate deleted objects containers just in case + wko = res[0]["wellKnownObjects"] + listwko = [] + proposed_objectguid = None + for o in wko: + dsdb_dn = dsdb_Dn(self.samdb, o.decode('utf8'), dsdb.DSDB_SYNTAX_BINARY_DN) + if self.is_deleted_objects_dn(dsdb_dn): + self.report("wellKnownObjects had duplicate Deleted Objects value %s" % o) + # We really want to put this back in the same spot + # as the original one, so that on replication we + # merge, rather than conflict. + proposed_objectguid = dsdb_dn.dn.get_extended_component("GUID") + listwko.append(str(o)) + + if proposed_objectguid is not None: + guid_suffix = "\nobjectGUID: %s" % str(misc.GUID(proposed_objectguid)) + else: + wko_prefix = "B:32:%s" % dsdb.DS_GUID_DELETED_OBJECTS_CONTAINER + listwko.append('%s:%s' % (wko_prefix, dn)) + guid_suffix = "" + + + domain_sid = security.dom_sid(self.samdb.get_domain_sid()) + sec_desc = get_deletedobjects_descriptor(domain_sid, + name_map=self.name_map) + sec_desc_b64 = b64encode(sec_desc).decode('utf8') + + # Insert a brand new Deleted Objects container + self.samdb.add_ldif("""dn: %s +objectClass: top +objectClass: container +description: Container for deleted objects +isDeleted: TRUE +isCriticalSystemObject: TRUE +showInAdvancedViewOnly: TRUE +nTSecurityDescriptor:: %s +systemFlags: -1946157056%s""" % (dn, sec_desc_b64, guid_suffix), + controls=["relax:0", "provision:0"]) + + delta = ldb.Message() + delta.dn = ldb.Dn(self.samdb, str(res[0]["dn"])) + delta["wellKnownObjects"] = ldb.MessageElement(listwko, + ldb.FLAG_MOD_REPLACE, + "wellKnownObjects") + + # Insert the link to the brand new container + if self.do_modify(delta, ["relax:0"], + "NC %s lacks Deleted Objects WKGUID" % nc, + validate=False): + self.report("Added %s well known guid link" % dn) + + self.deleted_objects_containers.append(dn) + + return error_count + + def report(self, msg): + """print a message unless quiet is set""" + if self.quiet: + return + if self.colour: + if msg.startswith('ERROR'): + msg = c_RED('ERROR') + msg[5:] + elif msg.startswith('WARNING'): + msg = c_DARK_YELLOW('WARNING') + msg[7:] + elif msg.startswith('INFO'): + msg = c_DARK_CYAN('INFO') + msg[4:] + elif msg.startswith('NOTICE'): + msg = c_DARK_CYAN('NOTICE') + msg[6:] + elif msg.startswith('NOTE'): + msg = c_DARK_CYAN('NOTE') + msg[4:] + elif msg.startswith('SKIPPING'): + msg = c_DARK_GREEN('SKIPPING') + msg[8:] + + print(msg) + + def confirm(self, msg, allow_all=False, forced=False): + """confirm a change""" + if not self.fix: + return False + if self.quiet: + return self.yes + if self.yes: + forced = True + return common.confirm(msg, forced=forced, allow_all=allow_all) + + ################################################################ + # a local confirm function with support for 'all' + def confirm_all(self, msg, all_attr): + """confirm a change with support for "all" """ + if not self.fix: + return False + if getattr(self, all_attr) == 'NONE': + return False + if getattr(self, all_attr) == 'ALL': + forced = True + else: + forced = self.yes + if self.quiet: + return forced + c = common.confirm(msg, forced=forced, allow_all=True) + if c == 'ALL': + setattr(self, all_attr, 'ALL') + return True + if c == 'NONE': + setattr(self, all_attr, 'NONE') + return False + return c + + def do_delete(self, dn, controls, msg): + """delete dn with optional verbose output""" + if self.verbose: + self.report("delete DN %s" % dn) + try: + controls = controls + ["local_oid:%s:0" % dsdb.DSDB_CONTROL_DBCHECK] + self.samdb.delete(dn, controls=controls) + except Exception as err: + if self.in_transaction: + raise CommandError("%s : %s" % (msg, err)) + self.report("%s : %s" % (msg, err)) + return False + return True + + def do_modify(self, m, controls, msg, validate=True): + """perform a modify with optional verbose output""" + controls = controls + ["local_oid:%s:0" % dsdb.DSDB_CONTROL_DBCHECK] + if self.verbose: + self.report(self.samdb.write_ldif(m, ldb.CHANGETYPE_MODIFY)) + self.report("controls: %r" % controls) + try: + self.samdb.modify(m, controls=controls, validate=validate) + except Exception as err: + if self.in_transaction: + raise CommandError("%s : %s" % (msg, err)) + self.report("%s : %s" % (msg, err)) + return False + return True + + def do_rename(self, from_dn, to_rdn, to_base, controls, msg): + """perform a rename with optional verbose output""" + if self.verbose: + self.report("""dn: %s +changeType: modrdn +newrdn: %s +deleteOldRdn: 1 +newSuperior: %s""" % (str(from_dn), str(to_rdn), str(to_base))) + try: + to_dn = to_rdn + to_base + controls = controls + ["local_oid:%s:0" % dsdb.DSDB_CONTROL_DBCHECK] + self.samdb.rename(from_dn, to_dn, controls=controls) + except Exception as err: + if self.in_transaction: + raise CommandError("%s : %s" % (msg, err)) + self.report("%s : %s" % (msg, err)) + return False + return True + + def get_attr_linkID_and_reverse_name(self, attrname): + if attrname in self.link_id_cache: + return self.link_id_cache[attrname] + linkID = self.samdb_schema.get_linkId_from_lDAPDisplayName(attrname) + if linkID: + revname = self.samdb_schema.get_backlink_from_lDAPDisplayName(attrname) + else: + revname = None + self.link_id_cache[attrname] = (linkID, revname) + return linkID, revname + + def err_empty_attribute(self, dn, attrname): + """fix empty attributes""" + self.report("ERROR: Empty attribute %s in %s" % (attrname, dn)) + if not self.confirm_all('Remove empty attribute %s from %s?' % (attrname, dn), 'remove_all_empty_attributes'): + self.report("Not fixing empty attribute %s" % attrname) + return + + m = ldb.Message() + m.dn = dn + m[attrname] = ldb.MessageElement('', ldb.FLAG_MOD_DELETE, attrname) + if self.do_modify(m, ["relax:0", "show_recycled:1"], + "Failed to remove empty attribute %s" % attrname, validate=False): + self.report("Removed empty attribute %s" % attrname) + + def err_normalise_mismatch(self, dn, attrname, values): + """fix attribute normalisation errors, without altering sort order""" + self.report("ERROR: Normalisation error for attribute %s in %s" % (attrname, dn)) + mod_list = [] + for val in values: + normalised = self.samdb.dsdb_normalise_attributes( + self.samdb_schema, attrname, [val]) + if len(normalised) != 1: + self.report("Unable to normalise value '%s'" % val) + mod_list.append((val, '')) + elif (normalised[0] != val): + self.report("value '%s' should be '%s'" % (val, normalised[0])) + mod_list.append((val, normalised[0])) + if not self.confirm_all('Fix normalisation for %s from %s?' % (attrname, dn), 'fix_all_normalisation'): + self.report("Not fixing attribute %s" % attrname) + return + + m = ldb.Message() + m.dn = dn + for i in range(0, len(mod_list)): + (val, nval) = mod_list[i] + m['value_%u' % i] = ldb.MessageElement(val, ldb.FLAG_MOD_DELETE, attrname) + if nval != '': + m['normv_%u' % i] = ldb.MessageElement(nval, ldb.FLAG_MOD_ADD, + attrname) + + if self.do_modify(m, ["relax:0", "show_recycled:1"], + "Failed to normalise attribute %s" % attrname, + validate=False): + self.report("Normalised attribute %s" % attrname) + + def err_normalise_mismatch_replace(self, dn, attrname, values): + """fix attribute normalisation and/or sort errors""" + normalised = self.samdb.dsdb_normalise_attributes(self.samdb_schema, attrname, values) + if list(normalised) == values: + # how we got here is a mystery. + return + self.report("ERROR: Normalisation error for attribute '%s' in '%s'" % (attrname, dn)) + self.report("Values/Order of values do/does not match: %s/%s!" % (values, list(normalised))) + if not self.confirm_all("Fix normalisation for '%s' from '%s'?" % (attrname, dn), 'fix_all_normalisation'): + self.report("Not fixing attribute '%s'" % attrname) + return + + m = ldb.Message() + m.dn = dn + m[attrname] = ldb.MessageElement(normalised, ldb.FLAG_MOD_REPLACE, attrname) + + if self.do_modify(m, ["relax:0", "show_recycled:1"], + "Failed to normalise attribute %s" % attrname, + validate=False): + self.report("Normalised attribute %s" % attrname) + + def err_duplicate_values(self, dn, attrname, dup_values, values): + """fix duplicate attribute values""" + self.report("ERROR: Duplicate values for attribute '%s' in '%s'" % (attrname, dn)) + self.report("Values contain a duplicate: [%s]/[%s]!" % + (dump_attr_values(dup_values), dump_attr_values(values))) + if not self.confirm_all("Fix duplicates for '%s' from '%s'?" % (attrname, dn), 'fix_all_duplicates'): + self.report("Not fixing attribute '%s'" % attrname) + return + + m = ldb.Message() + m.dn = dn + m[attrname] = ldb.MessageElement(values, ldb.FLAG_MOD_REPLACE, attrname) + + if self.do_modify(m, ["relax:0", "show_recycled:1"], + "Failed to remove duplicate value on attribute %s" % attrname, + validate=False): + self.report("Removed duplicate value on attribute %s" % attrname) + + def is_deleted_objects_dn(self, dsdb_dn): + """see if a dsdb_Dn is the special Deleted Objects DN""" + return dsdb_dn.prefix == "B:32:%s:" % dsdb.DS_GUID_DELETED_OBJECTS_CONTAINER + + def err_missing_objectclass(self, dn): + """handle object without objectclass""" + self.report("ERROR: missing objectclass in object %s. If you have another working DC, please run 'samba-tool drs replicate --full-sync --local %s'" % (dn, self.samdb.get_nc_root(dn))) + if not self.confirm_all("If you cannot re-sync from another DC, do you wish to delete object '%s'?" % dn, 'fix_all_missing_objectclass'): + self.report("Not deleting object with missing objectclass '%s'" % dn) + return + if self.do_delete(dn, ["relax:0"], + "Failed to remove DN %s" % dn): + self.report("Removed DN %s" % dn) + + def err_deleted_dn(self, dn, attrname, val, dsdb_dn, correct_dn, remove_plausible=False): + """handle a DN pointing to a deleted object""" + if not remove_plausible: + self.report("ERROR: target DN is deleted for %s in object %s - %s" % (attrname, dn, val)) + self.report("Target GUID points at deleted DN %r" % str(correct_dn)) + if not self.confirm_all('Remove DN link?', 'remove_implausible_deleted_DN_links'): + self.report("Not removing") + return + else: + self.report("WARNING: target DN is deleted for %s in object %s - %s" % (attrname, dn, val)) + self.report("Target GUID points at deleted DN %r" % str(correct_dn)) + if not self.confirm_all('Remove stale DN link?', 'remove_plausible_deleted_DN_links'): + self.report("Not removing") + return + + m = ldb.Message() + m.dn = dn + m['old_value'] = ldb.MessageElement(val, ldb.FLAG_MOD_DELETE, attrname) + if self.do_modify(m, ["show_recycled:1", + "local_oid:%s:0" % dsdb.DSDB_CONTROL_REPLMD_VANISH_LINKS], + "Failed to remove deleted DN attribute %s" % attrname): + self.report("Removed deleted DN on attribute %s" % attrname) + + def err_missing_target_dn_or_GUID(self, dn, attrname, val, dsdb_dn): + """handle a missing target DN (if specified, GUID form can't be found, + and otherwise DN string form can't be found)""" + + # Don't change anything if the object itself is deleted + if str(dn).find('\\0ADEL') != -1: + # We don't bump the error count as Samba produces these + # in normal operation + self.report("WARNING: no target object found for GUID " + "component link %s in deleted object " + "%s - %s" % (attrname, dn, val)) + self.report("Not removing dangling one-way " + "link on deleted object " + "(tombstone garbage collection in progress?)") + return 0 + + # check if its a backlink + linkID, _ = self.get_attr_linkID_and_reverse_name(attrname) + if (linkID & 1 == 0) and str(dsdb_dn).find('\\0ADEL') == -1: + + linkID, reverse_link_name \ + = self.get_attr_linkID_and_reverse_name(attrname) + if reverse_link_name is not None: + self.report("WARNING: no target object found for GUID " + "component for one-way forward link " + "%s in object " + "%s - %s" % (attrname, dn, val)) + self.report("Not removing dangling forward link") + return 0 + + nc_root = self.samdb.get_nc_root(dn) + try: + target_nc_root = self.samdb.get_nc_root(dsdb_dn.dn) + except ldb.LdbError as e: + (enum, estr) = e.args + if enum != ldb.ERR_NO_SUCH_OBJECT: + raise + target_nc_root = None + + if target_nc_root is None: + # We don't bump the error count as Samba produces + # these in normal operation creating a lab domain (due + # to the way the rename is handled, links to + # now-expunged objects will never be fixed to stay + # inside the NC + self.report("WARNING: no target object found for GUID " + "component for link " + "%s in object to %s outside our NCs" + "%s - %s" % (attrname, dsdb_dn.dn, dn, val)) + self.report("Not removing dangling one-way " + "left-over link outside our NCs " + "(we might be building a renamed/lab domain)") + return 0 + + if nc_root != target_nc_root: + # We don't bump the error count as Samba produces these + # in normal operation + self.report("WARNING: no target object found for GUID " + "component for cross-partition link " + "%s in object " + "%s - %s" % (attrname, dn, val)) + self.report("Not removing dangling one-way " + "cross-partition link " + "(we might be mid-replication)") + return 0 + + # Due to our link handling one-way links pointing to + # missing objects are plausible. + # + # We don't bump the error count as Samba produces these + # in normal operation + self.report("WARNING: no target object found for GUID " + "component for DN value %s in object " + "%s - %s" % (attrname, dn, val)) + self.err_deleted_dn(dn, attrname, val, + dsdb_dn, dsdb_dn, True) + return 0 + + # We bump the error count here, as we should have deleted this + self.report("ERROR: no target object found for GUID " + "component for link %s in object " + "%s - %s" % (attrname, dn, val)) + self.err_deleted_dn(dn, attrname, val, dsdb_dn, dsdb_dn, False) + return 1 + + def err_missing_dn_GUID_component(self, dn, attrname, val, dsdb_dn, errstr): + """handle a missing GUID extended DN component""" + self.report("ERROR: %s component for %s in object %s - %s" % (errstr, attrname, dn, val)) + controls = ["extended_dn:1:1", "show_recycled:1"] + try: + res = self.samdb.search(base=str(dsdb_dn.dn), scope=ldb.SCOPE_BASE, + attrs=[], controls=controls) + except ldb.LdbError as e7: + (enum, estr) = e7.args + self.report("unable to find object for DN %s - (%s)" % (dsdb_dn.dn, estr)) + if enum != ldb.ERR_NO_SUCH_OBJECT: + raise + self.err_missing_target_dn_or_GUID(dn, attrname, val, dsdb_dn) + return + if len(res) == 0: + self.report("unable to find object for DN %s" % dsdb_dn.dn) + self.err_missing_target_dn_or_GUID(dn, attrname, val, dsdb_dn) + return + dsdb_dn.dn = res[0].dn + + if not self.confirm_all('Change DN to %s?' % str(dsdb_dn), 'fix_all_DN_GUIDs'): + self.report("Not fixing %s" % errstr) + return + m = ldb.Message() + m.dn = dn + m['old_value'] = ldb.MessageElement(val, ldb.FLAG_MOD_DELETE, attrname) + m['new_value'] = ldb.MessageElement(str(dsdb_dn), ldb.FLAG_MOD_ADD, attrname) + + if self.do_modify(m, ["show_recycled:1"], + "Failed to fix %s on attribute %s" % (errstr, attrname)): + self.report("Fixed %s on attribute %s" % (errstr, attrname)) + + def err_incorrect_binary_dn(self, dn, attrname, val, dsdb_dn, errstr): + """handle an incorrect binary DN component""" + self.report("ERROR: %s binary component for %s in object %s - %s" % (errstr, attrname, dn, val)) + + if not self.confirm_all('Change DN to %s?' % str(dsdb_dn), 'fix_all_binary_dn'): + self.report("Not fixing %s" % errstr) + return + m = ldb.Message() + m.dn = dn + m['old_value'] = ldb.MessageElement(val, ldb.FLAG_MOD_DELETE, attrname) + m['new_value'] = ldb.MessageElement(str(dsdb_dn), ldb.FLAG_MOD_ADD, attrname) + + if self.do_modify(m, ["show_recycled:1"], + "Failed to fix %s on attribute %s" % (errstr, attrname)): + self.report("Fixed %s on attribute %s" % (errstr, attrname)) + + def err_dn_string_component_old(self, dn, attrname, val, dsdb_dn, correct_dn): + """handle a DN string being incorrect due to a rename or delete""" + self.report("NOTE: old (due to rename or delete) DN string component for %s in object %s - %s" % (attrname, dn, val)) + dsdb_dn.dn = correct_dn + + if not self.confirm_all('Change DN to %s?' % str(dsdb_dn), + 'fix_all_old_dn_string_component_mismatch'): + self.report("Not fixing old string component") + return + m = ldb.Message() + m.dn = dn + m['old_value'] = ldb.MessageElement(val, ldb.FLAG_MOD_DELETE, attrname) + m['new_value'] = ldb.MessageElement(str(dsdb_dn), ldb.FLAG_MOD_ADD, attrname) + if self.do_modify(m, ["show_recycled:1", + "local_oid:%s:1" % dsdb.DSDB_CONTROL_DBCHECK_FIX_LINK_DN_NAME], + "Failed to fix old DN string on attribute %s" % (attrname)): + self.report("Fixed old DN string on attribute %s" % (attrname)) + + def err_dn_component_target_mismatch(self, dn, attrname, val, dsdb_dn, correct_dn, mismatch_type): + """handle a DN string being incorrect""" + self.report("ERROR: incorrect DN %s component for %s in object %s - %s" % (mismatch_type, attrname, dn, val)) + dsdb_dn.dn = correct_dn + + if not self.confirm_all('Change DN to %s?' % str(dsdb_dn), + 'fix_all_%s_dn_component_mismatch' % mismatch_type): + self.report("Not fixing %s component mismatch" % mismatch_type) + return + m = ldb.Message() + m.dn = dn + m['old_value'] = ldb.MessageElement(val, ldb.FLAG_MOD_DELETE, attrname) + m['new_value'] = ldb.MessageElement(str(dsdb_dn), ldb.FLAG_MOD_ADD, attrname) + if self.do_modify(m, ["show_recycled:1"], + "Failed to fix incorrect DN %s on attribute %s" % (mismatch_type, attrname)): + self.report("Fixed incorrect DN %s on attribute %s" % (mismatch_type, attrname)) + + def err_dn_component_missing_target_sid(self, dn, attrname, val, dsdb_dn, target_sid_blob): + """fix missing on linked attributes""" + self.report("ERROR: missing DN SID component for %s in object %s - %s" % (attrname, dn, val)) + + if len(dsdb_dn.prefix) != 0: + self.report("Not fixing missing DN SID on DN+BINARY or DN+STRING") + return + + correct_dn = ldb.Dn(self.samdb, dsdb_dn.dn.extended_str()) + correct_dn.set_extended_component("SID", target_sid_blob) + + if not self.confirm_all('Change DN to %s?' % correct_dn.extended_str(), + 'fix_all_SID_dn_component_missing'): + self.report("Not fixing missing DN SID component") + return + + target_guid_blob = correct_dn.get_extended_component("GUID") + guid_sid_dn = ldb.Dn(self.samdb, "") + guid_sid_dn.set_extended_component("GUID", target_guid_blob) + guid_sid_dn.set_extended_component("SID", target_sid_blob) + + m = ldb.Message() + m.dn = dn + m['new_value'] = ldb.MessageElement(guid_sid_dn.extended_str(), ldb.FLAG_MOD_ADD, attrname) + controls = [ + "show_recycled:1", + "local_oid:%s:1" % dsdb.DSDB_CONTROL_DBCHECK_FIX_LINK_DN_SID + ] + if self.do_modify(m, controls, + "Failed to ADD missing DN SID on attribute %s" % (attrname)): + self.report("Fixed missing DN SID on attribute %s" % (attrname)) + + def err_unknown_attribute(self, obj, attrname): + """handle an unknown attribute error""" + self.report("ERROR: unknown attribute '%s' in %s" % (attrname, obj.dn)) + if not self.confirm_all('Remove unknown attribute %s' % attrname, 'remove_all_unknown_attributes'): + self.report("Not removing %s" % attrname) + return + m = ldb.Message() + m.dn = obj.dn + m['old_value'] = ldb.MessageElement([], ldb.FLAG_MOD_DELETE, attrname) + if self.do_modify(m, ["relax:0", "show_recycled:1"], + "Failed to remove unknown attribute %s" % attrname): + self.report("Removed unknown attribute %s" % (attrname)) + + def err_undead_linked_attribute(self, obj, attrname, val): + """handle a link that should not be there on a deleted object""" + self.report("ERROR: linked attribute '%s' to '%s' is present on " + "deleted object %s" % (attrname, val, obj.dn)) + if not self.confirm_all('Remove linked attribute %s' % attrname, 'fix_undead_linked_attributes'): + self.report("Not removing linked attribute %s" % attrname) + return + m = ldb.Message() + m.dn = obj.dn + m['old_value'] = ldb.MessageElement(val, ldb.FLAG_MOD_DELETE, attrname) + + if self.do_modify(m, ["show_recycled:1", "show_deleted:1", "reveal_internals:0", + "local_oid:%s:0" % dsdb.DSDB_CONTROL_REPLMD_VANISH_LINKS], + "Failed to delete forward link %s" % attrname): + self.report("Fixed undead forward link %s" % (attrname)) + + def err_missing_backlink(self, obj, attrname, val, backlink_name, target_dn): + """handle a missing backlink value""" + self.report("ERROR: missing backlink attribute '%s' in %s for link %s in %s" % (backlink_name, target_dn, attrname, obj.dn)) + if not self.confirm_all('Fix missing backlink %s' % backlink_name, 'fix_all_missing_backlinks'): + self.report("Not fixing missing backlink %s" % backlink_name) + return + m = ldb.Message() + m.dn = target_dn + m['new_value'] = ldb.MessageElement(val, ldb.FLAG_MOD_ADD, backlink_name) + if self.do_modify(m, ["show_recycled:1", "relax:0"], + "Failed to fix missing backlink %s" % backlink_name): + self.report("Fixed missing backlink %s" % (backlink_name)) + + def err_incorrect_rmd_flags(self, obj, attrname, revealed_dn): + """handle a incorrect RMD_FLAGS value""" + rmd_flags = int(revealed_dn.dn.get_extended_component("RMD_FLAGS")) + self.report("ERROR: incorrect RMD_FLAGS value %u for attribute '%s' in %s for link %s" % (rmd_flags, attrname, obj.dn, revealed_dn.dn.extended_str())) + if not self.confirm_all('Fix incorrect RMD_FLAGS %u' % rmd_flags, 'fix_rmd_flags'): + self.report("Not fixing incorrect RMD_FLAGS %u" % rmd_flags) + return + m = ldb.Message() + m.dn = obj.dn + m['old_value'] = ldb.MessageElement(str(revealed_dn), ldb.FLAG_MOD_DELETE, attrname) + if self.do_modify(m, ["show_recycled:1", "reveal_internals:0", "show_deleted:0"], + "Failed to fix incorrect RMD_FLAGS %u" % rmd_flags): + self.report("Fixed incorrect RMD_FLAGS %u" % (rmd_flags)) + + def err_orphaned_backlink(self, obj_dn, backlink_attr, backlink_val, + target_dn, forward_attr, forward_syntax, + check_duplicates=True): + """handle a orphaned backlink value""" + if check_duplicates is True and self.has_duplicate_links(target_dn, forward_attr, forward_syntax): + self.report("WARNING: Keep orphaned backlink attribute " + + "'%s' in '%s' for link '%s' in '%s'" % ( + backlink_attr, obj_dn, forward_attr, target_dn)) + return + self.report("ERROR: orphaned backlink attribute '%s' in %s for link %s in %s" % (backlink_attr, obj_dn, forward_attr, target_dn)) + if not self.confirm_all('Remove orphaned backlink %s' % backlink_attr, 'fix_all_orphaned_backlinks'): + self.report("Not removing orphaned backlink %s" % backlink_attr) + return + m = ldb.Message() + m.dn = obj_dn + m['value'] = ldb.MessageElement(backlink_val, ldb.FLAG_MOD_DELETE, backlink_attr) + if self.do_modify(m, ["show_recycled:1", "relax:0"], + "Failed to fix orphaned backlink %s" % backlink_attr): + self.report("Fixed orphaned backlink %s" % (backlink_attr)) + + def err_recover_forward_links(self, obj, forward_attr, forward_vals): + """handle a duplicate links value""" + + self.report("RECHECK: 'Missing/Duplicate/Correct link' lines above for attribute '%s' in '%s'" % (forward_attr, obj.dn)) + + if not self.confirm_all("Commit fixes for (missing/duplicate) forward links in attribute '%s'" % forward_attr, 'recover_all_forward_links'): + self.report("Not fixing corrupted (missing/duplicate) forward links in attribute '%s' of '%s'" % ( + forward_attr, obj.dn)) + return + m = ldb.Message() + m.dn = obj.dn + m['value'] = ldb.MessageElement(forward_vals, ldb.FLAG_MOD_REPLACE, forward_attr) + if self.do_modify(m, ["local_oid:%s:1" % dsdb.DSDB_CONTROL_DBCHECK_FIX_DUPLICATE_LINKS], + "Failed to fix duplicate links in attribute '%s'" % forward_attr): + self.report("Fixed duplicate links in attribute '%s'" % (forward_attr)) + duplicate_cache_key = "%s:%s" % (str(obj.dn), forward_attr) + assert duplicate_cache_key in self.duplicate_link_cache + self.duplicate_link_cache[duplicate_cache_key] = False + + def err_no_fsmoRoleOwner(self, obj): + """handle a missing fSMORoleOwner""" + self.report("ERROR: fSMORoleOwner not found for role %s" % (obj.dn)) + res = self.samdb.search("", + scope=ldb.SCOPE_BASE, attrs=["dsServiceName"]) + assert len(res) == 1 + serviceName = str(res[0]["dsServiceName"][0]) + if not self.confirm_all('Seize role %s onto current DC by adding fSMORoleOwner=%s' % (obj.dn, serviceName), 'seize_fsmo_role'): + self.report("Not Seizing role %s onto current DC by adding fSMORoleOwner=%s" % (obj.dn, serviceName)) + return + m = ldb.Message() + m.dn = obj.dn + m['value'] = ldb.MessageElement(serviceName, ldb.FLAG_MOD_ADD, 'fSMORoleOwner') + if self.do_modify(m, [], + "Failed to seize role %s onto current DC by adding fSMORoleOwner=%s" % (obj.dn, serviceName)): + self.report("Seized role %s onto current DC by adding fSMORoleOwner=%s" % (obj.dn, serviceName)) + + def err_missing_parent(self, obj): + """handle a missing parent""" + self.report("ERROR: parent object not found for %s" % (obj.dn)) + if not self.confirm_all('Move object %s into LostAndFound?' % (obj.dn), 'move_to_lost_and_found'): + self.report('Not moving object %s into LostAndFound' % (obj.dn)) + return + + keep_transaction = False + self.samdb.transaction_start() + try: + nc_root = self.samdb.get_nc_root(obj.dn) + lost_and_found = self.samdb.get_wellknown_dn(nc_root, dsdb.DS_GUID_LOSTANDFOUND_CONTAINER) + new_dn = ldb.Dn(self.samdb, str(obj.dn)) + new_dn.remove_base_components(len(new_dn) - 1) + if self.do_rename(obj.dn, new_dn, lost_and_found, ["show_deleted:0", "relax:0"], + "Failed to rename object %s into lostAndFound at %s" % (obj.dn, new_dn + lost_and_found)): + self.report("Renamed object %s into lostAndFound at %s" % (obj.dn, new_dn + lost_and_found)) + + m = ldb.Message() + m.dn = obj.dn + m['lastKnownParent'] = ldb.MessageElement(str(obj.dn.parent()), ldb.FLAG_MOD_REPLACE, 'lastKnownParent') + + if self.do_modify(m, [], + "Failed to set lastKnownParent on lostAndFound object at %s" % (new_dn + lost_and_found)): + self.report("Set lastKnownParent on lostAndFound object at %s" % (new_dn + lost_and_found)) + keep_transaction = True + except: + self.samdb.transaction_cancel() + raise + + if keep_transaction: + self.samdb.transaction_commit() + else: + self.samdb.transaction_cancel() + + def err_wrong_dn(self, obj, new_dn, rdn_attr, rdn_val, name_val, controls): + """handle a wrong dn""" + + new_rdn = ldb.Dn(self.samdb, str(new_dn)) + new_rdn.remove_base_components(len(new_rdn) - 1) + new_parent = new_dn.parent() + + attributes = "" + if rdn_val != name_val: + attributes += "%s=%r " % (rdn_attr, rdn_val) + attributes += "name=%r" % (name_val) + + self.report("ERROR: wrong dn[%s] %s new_dn[%s]" % (obj.dn, attributes, new_dn)) + if not self.confirm_all("Rename %s to %s?" % (obj.dn, new_dn), 'fix_dn'): + self.report("Not renaming %s to %s" % (obj.dn, new_dn)) + return + + if self.do_rename(obj.dn, new_rdn, new_parent, controls, + "Failed to rename object %s into %s" % (obj.dn, new_dn)): + self.report("Renamed %s into %s" % (obj.dn, new_dn)) + + def err_wrong_instancetype(self, obj, calculated_instancetype): + """handle a wrong instanceType""" + self.report("ERROR: wrong instanceType %s on %s, should be %d" % (obj["instanceType"], obj.dn, calculated_instancetype)) + if not self.confirm_all('Change instanceType from %s to %d on %s?' % (obj["instanceType"], calculated_instancetype, obj.dn), 'fix_instancetype'): + self.report('Not changing instanceType from %s to %d on %s' % (obj["instanceType"], calculated_instancetype, obj.dn)) + return + + m = ldb.Message() + m.dn = obj.dn + m['value'] = ldb.MessageElement(str(calculated_instancetype), ldb.FLAG_MOD_REPLACE, 'instanceType') + if self.do_modify(m, ["local_oid:%s:0" % dsdb.DSDB_CONTROL_DBCHECK_MODIFY_RO_REPLICA], + "Failed to correct missing instanceType on %s by setting instanceType=%d" % (obj.dn, calculated_instancetype)): + self.report("Corrected instancetype on %s by setting instanceType=%d" % (obj.dn, calculated_instancetype)) + + def err_short_userParameters(self, obj, attrname, value): + # This is a truncated userParameters due to a pre 4.1 replication bug + self.report("ERROR: incorrect userParameters value on object %s. If you have another working DC that does not give this warning, please run 'samba-tool drs replicate --full-sync --local %s'" % (obj.dn, self.samdb.get_nc_root(obj.dn))) + + def err_base64_userParameters(self, obj, attrname, value): + """handle a userParameters that is wrongly base64 encoded""" + self.report("ERROR: wrongly formatted userParameters %s on %s, should not be base64-encoded" % (value, obj.dn)) + if not self.confirm_all('Convert userParameters from base64 encoding on %s?' % (obj.dn), 'fix_base64_userparameters'): + self.report('Not changing userParameters from base64 encoding on %s' % (obj.dn)) + return + + m = ldb.Message() + m.dn = obj.dn + m['value'] = ldb.MessageElement(b64decode(obj[attrname][0]), ldb.FLAG_MOD_REPLACE, 'userParameters') + if self.do_modify(m, [], + "Failed to correct base64-encoded userParameters on %s by converting from base64" % (obj.dn)): + self.report("Corrected base64-encoded userParameters on %s by converting from base64" % (obj.dn)) + + def err_utf8_userParameters(self, obj, attrname, value): + """handle a userParameters that is wrongly utf-8 encoded""" + self.report("ERROR: wrongly formatted userParameters on %s, " + "should not be pseudo-UTF8 encoded" % (obj.dn)) + if not self.confirm_all('Convert userParameters from UTF8 encoding on %s?' % (obj.dn), 'fix_utf8_userparameters'): + self.report('Not changing userParameters from UTF8 encoding on %s' % (obj.dn)) + return + + m = ldb.Message() + m.dn = obj.dn + m['value'] = ldb.MessageElement(obj[attrname][0].decode('utf8').encode('utf-16-le'), + ldb.FLAG_MOD_REPLACE, 'userParameters') + if self.do_modify(m, [], + "Failed to correct psudo-UTF8 encoded userParameters on %s by converting from UTF8" % (obj.dn)): + self.report("Corrected psudo-UTF8 encoded userParameters on %s by converting from UTF8" % (obj.dn)) + + def err_doubled_userParameters(self, obj, attrname, value): + """handle a userParameters that has been utf-16 encoded twice""" + self.report("ERROR: wrongly formatted userParameters on %s, should not be double UTF16 encoded" % (obj.dn)) + if not self.confirm_all('Convert userParameters from doubled UTF-16 encoding on %s?' % (obj.dn), 'fix_doubled_userparameters'): + self.report('Not changing userParameters from doubled UTF-16 encoding on %s' % (obj.dn)) + return + + m = ldb.Message() + m.dn = obj.dn + # m['value'] = ldb.MessageElement(obj[attrname][0].decode('utf-16-le').decode('utf-16-le').encode('utf-16-le'), + # hmm the above old python2 code doesn't make sense to me and cannot + # work in python3 because a string doesn't have a decode method. + # However in python2 for some unknown reason this double decode + # followed by encode seems to result in what looks like utf8. + # In python2 just .decode('utf-16-le').encode('utf-16-le') does nothing + # but trigger the 'double UTF16 encoded' condition again :/ + # + # In python2 and python3 value.decode('utf-16-le').encode('utf8') seems + # to do the trick and work as expected. + m['value'] = ldb.MessageElement(obj[attrname][0].decode('utf-16-le').encode('utf8'), + ldb.FLAG_MOD_REPLACE, 'userParameters') + + if self.do_modify(m, [], + "Failed to correct doubled-UTF16 encoded userParameters on %s by converting" % (obj.dn)): + self.report("Corrected doubled-UTF16 encoded userParameters on %s by converting" % (obj.dn)) + + def err_odd_userParameters(self, obj, attrname): + """Fix a truncated userParameters due to a pre 4.1 replication bug""" + self.report("ERROR: incorrect userParameters value on object %s (odd length). If you have another working DC that does not give this warning, please run 'samba-tool drs replicate --full-sync --local %s'" % (obj.dn, self.samdb.get_nc_root(obj.dn))) + + def find_revealed_link(self, dn, attrname, guid): + """return a revealed link in an object""" + res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE, attrs=[attrname], + controls=["show_deleted:0", "extended_dn:0", "reveal_internals:0"]) + syntax_oid = self.samdb_schema.get_syntax_oid_from_lDAPDisplayName(attrname) + for val in res[0][attrname]: + dsdb_dn = dsdb_Dn(self.samdb, val.decode('utf8'), syntax_oid) + guid2 = dsdb_dn.dn.get_extended_component("GUID") + if guid == guid2: + return dsdb_dn + return None + + def check_duplicate_links(self, obj, forward_attr, forward_syntax, forward_linkID, backlink_attr): + """check a linked values for duplicate forward links""" + error_count = 0 + + duplicate_dict = dict() + unique_dict = dict() + + # Only forward links can have this problem + if forward_linkID & 1: + # If we got the reverse, skip it + return (error_count, duplicate_dict, unique_dict) + + if backlink_attr is None: + return (error_count, duplicate_dict, unique_dict) + + duplicate_cache_key = "%s:%s" % (str(obj.dn), forward_attr) + if duplicate_cache_key not in self.duplicate_link_cache: + self.duplicate_link_cache[duplicate_cache_key] = False + + for val in obj[forward_attr]: + dsdb_dn = dsdb_Dn(self.samdb, val.decode('utf8'), forward_syntax) + + # all DNs should have a GUID component + guid = dsdb_dn.dn.get_extended_component("GUID") + if guid is None: + continue + guidstr = str(misc.GUID(guid)) + keystr = guidstr + dsdb_dn.prefix + if keystr not in unique_dict: + unique_dict[keystr] = dsdb_dn + continue + error_count += 1 + if keystr not in duplicate_dict: + duplicate_dict[keystr] = dict() + duplicate_dict[keystr]["keep"] = None + duplicate_dict[keystr]["delete"] = list() + + # Now check for the highest RMD_VERSION + v1 = int(unique_dict[keystr].dn.get_extended_component("RMD_VERSION")) + v2 = int(dsdb_dn.dn.get_extended_component("RMD_VERSION")) + if v1 > v2: + duplicate_dict[keystr]["keep"] = unique_dict[keystr] + duplicate_dict[keystr]["delete"].append(dsdb_dn) + continue + if v1 < v2: + duplicate_dict[keystr]["keep"] = dsdb_dn + duplicate_dict[keystr]["delete"].append(unique_dict[keystr]) + unique_dict[keystr] = dsdb_dn + continue + # Fallback to the highest RMD_LOCAL_USN + u1 = int(unique_dict[keystr].dn.get_extended_component("RMD_LOCAL_USN")) + u2 = int(dsdb_dn.dn.get_extended_component("RMD_LOCAL_USN")) + if u1 >= u2: + duplicate_dict[keystr]["keep"] = unique_dict[keystr] + duplicate_dict[keystr]["delete"].append(dsdb_dn) + continue + duplicate_dict[keystr]["keep"] = dsdb_dn + duplicate_dict[keystr]["delete"].append(unique_dict[keystr]) + unique_dict[keystr] = dsdb_dn + + if error_count != 0: + self.duplicate_link_cache[duplicate_cache_key] = True + + return (error_count, duplicate_dict, unique_dict) + + def has_duplicate_links(self, dn, forward_attr, forward_syntax): + """check a linked values for duplicate forward links""" + error_count = 0 + + duplicate_cache_key = "%s:%s" % (str(dn), forward_attr) + if duplicate_cache_key in self.duplicate_link_cache: + return self.duplicate_link_cache[duplicate_cache_key] + + forward_linkID, backlink_attr = self.get_attr_linkID_and_reverse_name(forward_attr) + + attrs = [forward_attr] + controls = ["extended_dn:1:1", "reveal_internals:0"] + + # check its the right GUID + try: + res = self.samdb.search(base=str(dn), scope=ldb.SCOPE_BASE, + attrs=attrs, controls=controls) + except ldb.LdbError as e8: + (enum, estr) = e8.args + if enum != ldb.ERR_NO_SUCH_OBJECT: + raise + + return False + + obj = res[0] + error_count, duplicate_dict, unique_dict = \ + self.check_duplicate_links(obj, forward_attr, forward_syntax, forward_linkID, backlink_attr) + + if duplicate_cache_key in self.duplicate_link_cache: + return self.duplicate_link_cache[duplicate_cache_key] + + return False + + def find_missing_forward_links_from_backlinks(self, obj, + forward_attr, + forward_syntax, + backlink_attr, + forward_unique_dict): + """Find all backlinks linking to obj_guid_str not already in forward_unique_dict""" + missing_forward_links = [] + error_count = 0 + + if backlink_attr is None: + return (missing_forward_links, error_count) + + if forward_syntax != ldb.SYNTAX_DN: + self.report("Not checking for missing forward links for syntax: %s" % + forward_syntax) + return (missing_forward_links, error_count) + + if "sortedLinks" in self.compatibleFeatures: + self.report("Not checking for missing forward links because the db " + + "has the sortedLinks feature") + return (missing_forward_links, error_count) + + try: + obj_guid = obj['objectGUID'][0] + obj_guid_str = str(ndr_unpack(misc.GUID, obj_guid)) + filter = "(%s=)" % (backlink_attr, obj_guid_str) + + res = self.samdb.search(expression=filter, + scope=ldb.SCOPE_SUBTREE, attrs=["objectGUID"], + controls=["extended_dn:1:1", + "search_options:1:2", + "paged_results:1:1000"]) + except ldb.LdbError as e9: + (enum, estr) = e9.args + raise + + for r in res: + target_dn = dsdb_Dn(self.samdb, r.dn.extended_str(), forward_syntax) + + guid = target_dn.dn.get_extended_component("GUID") + guidstr = str(misc.GUID(guid)) + if guidstr in forward_unique_dict: + continue + + # A valid forward link looks like this: + # + # ; + # ; + # ; + # ; + # ; + # ; + # ; + # ; + # ; + # CN=unsorted-u8,CN=Users,DC=release-4-5-0-pre1,DC=samba,DC=corp + # + # Note that versions older than Samba 4.8 create + # links with RMD_VERSION=0. + # + # Try to get the local_usn and time from objectClass + # if possible and fallback to any other one. + repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob, + obj['replPropertyMetadata'][0]) + for o in repl.ctr.array: + local_usn = o.local_usn + t = o.originating_change_time + if o.attid == drsuapi.DRSUAPI_ATTID_objectClass: + break + + # We use a magic invocationID for restoring missing + # forward links to recover from bug #13228. + # This should allow some more future magic to fix the + # problem. + # + # It also means it looses the conflict resolution + # against almost every real invocation, if the + # version is also 0. + originating_invocid = misc.GUID("ffffffff-4700-4700-4700-000000b13228") + originating_usn = 1 + + rmd_addtime = t + rmd_changetime = t + rmd_flags = 0 + rmd_invocid = originating_invocid + rmd_originating_usn = originating_usn + rmd_local_usn = local_usn + rmd_version = 0 + + target_dn.dn.set_extended_component("RMD_ADDTIME", str(rmd_addtime)) + target_dn.dn.set_extended_component("RMD_CHANGETIME", str(rmd_changetime)) + target_dn.dn.set_extended_component("RMD_FLAGS", str(rmd_flags)) + target_dn.dn.set_extended_component("RMD_INVOCID", ndr_pack(rmd_invocid)) + target_dn.dn.set_extended_component("RMD_ORIGINATING_USN", str(rmd_originating_usn)) + target_dn.dn.set_extended_component("RMD_LOCAL_USN", str(rmd_local_usn)) + target_dn.dn.set_extended_component("RMD_VERSION", str(rmd_version)) + + error_count += 1 + missing_forward_links.append(target_dn) + + return (missing_forward_links, error_count) + + def check_dn(self, obj, attrname, syntax_oid): + """check a DN attribute for correctness""" + error_count = 0 + obj_guid = obj['objectGUID'][0] + + linkID, reverse_link_name = self.get_attr_linkID_and_reverse_name(attrname) + if reverse_link_name is not None: + reverse_syntax_oid = self.samdb_schema.get_syntax_oid_from_lDAPDisplayName(reverse_link_name) + else: + reverse_syntax_oid = None + + is_member_link = attrname in ("member", "memberOf") + if is_member_link and self.quick_membership_checks: + duplicate_dict = {} + else: + error_count, duplicate_dict, unique_dict = \ + self.check_duplicate_links(obj, attrname, syntax_oid, + linkID, reverse_link_name) + + if len(duplicate_dict) != 0: + + missing_forward_links, missing_error_count = \ + self.find_missing_forward_links_from_backlinks(obj, + attrname, syntax_oid, + reverse_link_name, + unique_dict) + error_count += missing_error_count + + forward_links = [dn for dn in unique_dict.values()] + + if missing_error_count != 0: + self.report("ERROR: Missing and duplicate forward link values for attribute '%s' in '%s'" % ( + attrname, obj.dn)) + else: + self.report("ERROR: Duplicate forward link values for attribute '%s' in '%s'" % (attrname, obj.dn)) + for m in missing_forward_links: + self.report("Missing link '%s'" % (m)) + if not self.confirm_all("Schedule re-adding missing forward link for attribute %s" % attrname, + 'fix_all_missing_forward_links'): + self.err_orphaned_backlink(m.dn, reverse_link_name, + obj.dn.extended_str(), obj.dn, + attrname, syntax_oid, + check_duplicates=False) + continue + forward_links += [m] + for keystr in duplicate_dict.keys(): + d = duplicate_dict[keystr] + for dd in d["delete"]: + self.report("Duplicate link '%s'" % dd) + self.report("Correct link '%s'" % d["keep"]) + + # We now construct the sorted dn values. + # They're sorted by the objectGUID of the target + # See dsdb_Dn.__cmp__() + vals = [str(dn) for dn in sorted(forward_links)] + self.err_recover_forward_links(obj, attrname, vals) + # We should continue with the fixed values + obj[attrname] = ldb.MessageElement(vals, 0, attrname) + + for val in obj[attrname]: + dsdb_dn = dsdb_Dn(self.samdb, val.decode('utf8'), syntax_oid) + + # all DNs should have a GUID component + guid = dsdb_dn.dn.get_extended_component("GUID") + if guid is None: + error_count += 1 + self.err_missing_dn_GUID_component(obj.dn, attrname, val, dsdb_dn, + "missing GUID") + continue + + guidstr = str(misc.GUID(guid)) + attrs = ['isDeleted', 'replPropertyMetaData'] + + if (str(attrname).lower() == 'msds-hasinstantiatedncs') and (obj.dn == self.ntds_dsa): + fixing_msDS_HasInstantiatedNCs = True + attrs.append("instanceType") + else: + fixing_msDS_HasInstantiatedNCs = False + + if reverse_link_name is not None: + attrs.append(reverse_link_name) + + # check its the right GUID + try: + res = self.samdb.search(base="" % guidstr, scope=ldb.SCOPE_BASE, + attrs=attrs, controls=["extended_dn:1:1", "show_recycled:1", + "reveal_internals:0" + ]) + except ldb.LdbError as e3: + (enum, estr) = e3.args + if enum != ldb.ERR_NO_SUCH_OBJECT: + raise + + # We don't always want to + error_count += self.err_missing_target_dn_or_GUID(obj.dn, + attrname, + val, + dsdb_dn) + continue + + if fixing_msDS_HasInstantiatedNCs: + dsdb_dn.prefix = "B:8:%08X:" % int(res[0]['instanceType'][0]) + dsdb_dn.binary = "%08X" % int(res[0]['instanceType'][0]) + + if str(dsdb_dn) != str(val): + error_count += 1 + self.err_incorrect_binary_dn(obj.dn, attrname, val, dsdb_dn, "incorrect instanceType part of Binary DN") + continue + + # now we have two cases - the source object might or might not be deleted + is_deleted = 'isDeleted' in obj and str(obj['isDeleted'][0]).upper() == 'TRUE' + target_is_deleted = 'isDeleted' in res[0] and str(res[0]['isDeleted'][0]).upper() == 'TRUE' + + if is_deleted and obj.dn not in self.deleted_objects_containers and linkID: + # A fully deleted object should not have any linked + # attributes. (MS-ADTS 3.1.1.5.5.1.1 Tombstone + # Requirements and 3.1.1.5.5.1.3 Recycled-Object + # Requirements) + self.err_undead_linked_attribute(obj, attrname, val) + error_count += 1 + continue + elif target_is_deleted and not self.is_deleted_objects_dn(dsdb_dn) and linkID: + # the target DN is not allowed to be deleted, unless the target DN is the + # special Deleted Objects container + error_count += 1 + local_usn = dsdb_dn.dn.get_extended_component("RMD_LOCAL_USN") + if local_usn: + if 'replPropertyMetaData' in res[0]: + repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob, + res[0]['replPropertyMetadata'][0]) + found_data = False + for o in repl.ctr.array: + if o.attid == drsuapi.DRSUAPI_ATTID_isDeleted: + deleted_usn = o.local_usn + if deleted_usn >= int(local_usn): + # If the object was deleted after the link + # was last modified then, clean it up here + found_data = True + break + + if found_data: + self.err_deleted_dn(obj.dn, attrname, + val, dsdb_dn, res[0].dn, True) + continue + + self.err_deleted_dn(obj.dn, attrname, val, dsdb_dn, res[0].dn, False) + continue + + # We should not check for incorrect + # components on deleted links, as these are allowed to + # go stale (we just need the GUID, not the name) + rmd_blob = dsdb_dn.dn.get_extended_component("RMD_FLAGS") + rmd_flags = 0 + if rmd_blob is not None: + rmd_flags = int(rmd_blob) + + # assert the DN matches in string form, where a reverse + # link exists, otherwise (below) offer to fix it as a non-error. + # The string form is essentially only kept for forensics, + # as we always re-resolve by GUID in normal operations. + if not rmd_flags & 1 and reverse_link_name is not None: + if str(res[0].dn) != str(dsdb_dn.dn): + error_count += 1 + self.err_dn_component_target_mismatch(obj.dn, attrname, val, dsdb_dn, + res[0].dn, "string") + continue + + if res[0].dn.get_extended_component("GUID") != dsdb_dn.dn.get_extended_component("GUID"): + error_count += 1 + self.err_dn_component_target_mismatch(obj.dn, attrname, val, dsdb_dn, + res[0].dn, "GUID") + continue + + target_sid = res[0].dn.get_extended_component("SID") + link_sid = dsdb_dn.dn.get_extended_component("SID") + if link_sid is None and target_sid is not None: + error_count += 1 + self.err_dn_component_missing_target_sid(obj.dn, attrname, val, + dsdb_dn, target_sid) + continue + if link_sid != target_sid: + error_count += 1 + self.err_dn_component_target_mismatch(obj.dn, attrname, val, dsdb_dn, + res[0].dn, "SID") + continue + + # Only for non-links, not even forward-only links + # (otherwise this breaks repl_meta_data): + # + # Now we have checked the GUID and SID, offer to fix old + # DN strings as a non-error (DNs, not links so no + # backlink). Samba does not maintain this string + # otherwise, so we don't increment error_count. + if reverse_link_name is None: + if linkID == 0 and str(res[0].dn) != str(dsdb_dn.dn): + # Pass in the old/bad DN without the part, + # otherwise the LDB code will correct it on the way through + # (Note: we still want to preserve the DSDB DN prefix in the + # case of binary DNs) + bad_dn = dsdb_dn.prefix + dsdb_dn.dn.get_linearized() + self.err_dn_string_component_old(obj.dn, attrname, bad_dn, + dsdb_dn, res[0].dn) + continue + + if is_member_link and self.quick_membership_checks: + continue + + # check the reverse_link is correct if there should be one + match_count = 0 + if reverse_link_name in res[0]: + for v in res[0][reverse_link_name]: + v_dn = dsdb_Dn(self.samdb, v.decode('utf8')) + v_guid = v_dn.dn.get_extended_component("GUID") + v_blob = v_dn.dn.get_extended_component("RMD_FLAGS") + v_rmd_flags = 0 + if v_blob is not None: + v_rmd_flags = int(v_blob) + if v_rmd_flags & 1: + continue + if v_guid == obj_guid: + match_count += 1 + + if match_count != 1: + if syntax_oid == dsdb.DSDB_SYNTAX_BINARY_DN or reverse_syntax_oid == dsdb.DSDB_SYNTAX_BINARY_DN: + if not linkID & 1: + # Forward binary multi-valued linked attribute + forward_count = 0 + for w in obj[attrname]: + w_guid = dsdb_Dn(self.samdb, w.decode('utf8')).dn.get_extended_component("GUID") + if w_guid == guid: + forward_count += 1 + + if match_count == forward_count: + continue + expected_count = 0 + for v in obj[attrname]: + v_dn = dsdb_Dn(self.samdb, v.decode('utf8')) + v_guid = v_dn.dn.get_extended_component("GUID") + v_blob = v_dn.dn.get_extended_component("RMD_FLAGS") + v_rmd_flags = 0 + if v_blob is not None: + v_rmd_flags = int(v_blob) + if v_rmd_flags & 1: + continue + if v_guid == guid: + expected_count += 1 + + if match_count == expected_count: + continue + + diff_count = expected_count - match_count + + if linkID & 1: + # If there's a backward link on binary multi-valued linked attribute, + # let the check on the forward link remedy the value. + # UNLESS, there is no forward link detected. + if match_count == 0: + error_count += 1 + self.err_orphaned_backlink(obj.dn, attrname, + val, dsdb_dn.dn, + reverse_link_name, + reverse_syntax_oid) + continue + # Only warn here and let the forward link logic fix it. + self.report("WARNING: Link (back) mismatch for '%s' (%d) on '%s' to '%s' (%d) on '%s'" % ( + attrname, expected_count, str(obj.dn), + reverse_link_name, match_count, str(dsdb_dn.dn))) + continue + + assert not target_is_deleted + + self.report("ERROR: Link (forward) mismatch for '%s' (%d) on '%s' to '%s' (%d) on '%s'" % ( + attrname, expected_count, str(obj.dn), + reverse_link_name, match_count, str(dsdb_dn.dn))) + + # Loop until the difference between the forward and + # the backward links is resolved. + while diff_count != 0: + error_count += 1 + if diff_count > 0: + if match_count > 0 or diff_count > 1: + # TODO no method to fix these right now + self.report("ERROR: Can't fix missing " + "multi-valued backlinks on %s" % str(dsdb_dn.dn)) + break + self.err_missing_backlink(obj, attrname, + obj.dn.extended_str(), + reverse_link_name, + dsdb_dn.dn) + diff_count -= 1 + else: + self.err_orphaned_backlink(res[0].dn, reverse_link_name, + obj.dn.extended_str(), obj.dn, + attrname, syntax_oid) + diff_count += 1 + + return error_count + + def find_repl_attid(self, repl, attid): + for o in repl.ctr.array: + if o.attid == attid: + return o + + return None + + def get_originating_time(self, val, attid): + """Read metadata properties and return the originating time for + a given attributeId. + + :return: the originating time or 0 if not found + """ + + repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob, val) + o = self.find_repl_attid(repl, attid) + if o is not None: + return o.originating_change_time + return 0 + + def process_metadata(self, dn, val): + """Read metadata properties and list attributes in it. + raises KeyError if the attid is unknown.""" + + set_att = set() + wrong_attids = set() + list_attid = [] + in_schema_nc = dn.is_child_of(self.schema_dn) + + repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob, val) + + for o in repl.ctr.array: + att = self.samdb_schema.get_lDAPDisplayName_by_attid(o.attid) + set_att.add(att.lower()) + list_attid.append(o.attid) + correct_attid = self.samdb_schema.get_attid_from_lDAPDisplayName(att, + is_schema_nc=in_schema_nc) + if correct_attid != o.attid: + wrong_attids.add(o.attid) + + return (set_att, list_attid, wrong_attids) + + def fix_metadata(self, obj, attr): + """re-write replPropertyMetaData elements for a single attribute for a + object. This is used to fix missing replPropertyMetaData elements""" + guid_str = str(ndr_unpack(misc.GUID, obj['objectGUID'][0])) + dn = ldb.Dn(self.samdb, "" % guid_str) + res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE, attrs=[attr], + controls=["search_options:1:2", + "show_recycled:1"]) + msg = res[0] + nmsg = ldb.Message() + nmsg.dn = dn + nmsg[attr] = ldb.MessageElement(msg[attr], ldb.FLAG_MOD_REPLACE, attr) + if self.do_modify(nmsg, ["relax:0", "provision:0", "show_recycled:1"], + "Failed to fix metadata for attribute %s" % attr): + self.report("Fixed metadata for attribute %s" % attr) + + def ace_get_effective_inherited_type(self, ace): + if ace.flags & security.SEC_ACE_FLAG_INHERIT_ONLY: + return None + + check = False + if ace.type == security.SEC_ACE_TYPE_ACCESS_ALLOWED_OBJECT: + check = True + elif ace.type == security.SEC_ACE_TYPE_ACCESS_DENIED_OBJECT: + check = True + elif ace.type == security.SEC_ACE_TYPE_SYSTEM_AUDIT_OBJECT: + check = True + elif ace.type == security.SEC_ACE_TYPE_SYSTEM_ALARM_OBJECT: + check = True + + if not check: + return None + + if not ace.object.flags & security.SEC_ACE_INHERITED_OBJECT_TYPE_PRESENT: + return None + + return str(ace.object.inherited_type) + + def lookup_class_schemaIDGUID(self, cls): + if cls in self.class_schemaIDGUID: + return self.class_schemaIDGUID[cls] + + flt = "(&(ldapDisplayName=%s)(objectClass=classSchema))" % cls + res = self.samdb.search(base=self.schema_dn, + expression=flt, + attrs=["schemaIDGUID"]) + t = str(ndr_unpack(misc.GUID, res[0]["schemaIDGUID"][0])) + + self.class_schemaIDGUID[cls] = t + return t + + def process_sd(self, dn, obj): + sd_attr = "nTSecurityDescriptor" + sd_val = obj[sd_attr] + + sd = ndr_unpack(security.descriptor, sd_val[0]) + + is_deleted = 'isDeleted' in obj and str(obj['isDeleted'][0]).upper() == 'TRUE' + if is_deleted: + # we don't fix deleted objects + return (sd, None) + + sd_clean = security.descriptor() + sd_clean.owner_sid = sd.owner_sid + sd_clean.group_sid = sd.group_sid + sd_clean.type = sd.type + sd_clean.revision = sd.revision + + broken = False + last_inherited_type = None + + aces = [] + if sd.sacl is not None: + aces = sd.sacl.aces + for i in range(0, len(aces)): + ace = aces[i] + + if not ace.flags & security.SEC_ACE_FLAG_INHERITED_ACE: + sd_clean.sacl_add(ace) + continue + + t = self.ace_get_effective_inherited_type(ace) + if t is None: + continue + + if last_inherited_type is not None: + if t != last_inherited_type: + # if it inherited from more than + # one type it's very likely to be broken + # + # If not the recalculation will calculate + # the same result. + broken = True + continue + + last_inherited_type = t + + aces = [] + if sd.dacl is not None: + aces = sd.dacl.aces + for i in range(0, len(aces)): + ace = aces[i] + + if not ace.flags & security.SEC_ACE_FLAG_INHERITED_ACE: + sd_clean.dacl_add(ace) + continue + + t = self.ace_get_effective_inherited_type(ace) + if t is None: + continue + + if last_inherited_type is not None: + if t != last_inherited_type: + # if it inherited from more than + # one type it's very likely to be broken + # + # If not the recalculation will calculate + # the same result. + broken = True + continue + + last_inherited_type = t + + if broken: + return (sd_clean, sd) + + if last_inherited_type is None: + # ok + return (sd, None) + + cls = None + try: + cls = obj["objectClass"][-1] + except KeyError as e: + pass + + if cls is None: + res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE, + attrs=["isDeleted", "objectClass"], + controls=["show_recycled:1"]) + o = res[0] + is_deleted = 'isDeleted' in o and str(o['isDeleted'][0]).upper() == 'TRUE' + if is_deleted: + # we don't fix deleted objects + return (sd, None) + cls = o["objectClass"][-1] + + t = self.lookup_class_schemaIDGUID(cls) + + if t != last_inherited_type: + # broken + return (sd_clean, sd) + + # ok + return (sd, None) + + def err_wrong_sd(self, dn, sd, sd_broken): + """re-write the SD due to incorrect inherited ACEs""" + sd_attr = "nTSecurityDescriptor" + sd_val = ndr_pack(sd) + sd_flags = security.SECINFO_DACL | security.SECINFO_SACL + + if not self.confirm_all('Fix %s on %s?' % (sd_attr, dn), 'fix_ntsecuritydescriptor'): + self.report('Not fixing %s on %s\n' % (sd_attr, dn)) + return + + nmsg = ldb.Message() + nmsg.dn = dn + nmsg[sd_attr] = ldb.MessageElement(sd_val, ldb.FLAG_MOD_REPLACE, sd_attr) + if self.do_modify(nmsg, ["sd_flags:1:%d" % sd_flags], + "Failed to fix attribute %s" % sd_attr): + self.report("Fixed attribute '%s' of '%s'\n" % (sd_attr, dn)) + + def err_wrong_default_sd(self, dn, sd, diff): + """re-write the SD due to not matching the default (optional mode for fixing an incorrect provision)""" + sd_attr = "nTSecurityDescriptor" + sd_val = ndr_pack(sd) + sd_flags = security.SECINFO_DACL | security.SECINFO_SACL + if sd.owner_sid is not None: + sd_flags |= security.SECINFO_OWNER + if sd.group_sid is not None: + sd_flags |= security.SECINFO_GROUP + + if not self.confirm_all('Reset %s on %s back to provision default?\n%s' % (sd_attr, dn, diff), 'reset_all_well_known_acls'): + self.report('Not resetting %s on %s\n' % (sd_attr, dn)) + return + + m = ldb.Message() + m.dn = dn + m[sd_attr] = ldb.MessageElement(sd_val, ldb.FLAG_MOD_REPLACE, sd_attr) + if self.do_modify(m, ["sd_flags:1:%d" % sd_flags], + "Failed to reset attribute %s" % sd_attr): + self.report("Fixed attribute '%s' of '%s'\n" % (sd_attr, dn)) + + def err_missing_sd_owner(self, dn, sd): + """re-write the SD due to a missing owner or group""" + sd_attr = "nTSecurityDescriptor" + sd_val = ndr_pack(sd) + sd_flags = security.SECINFO_OWNER | security.SECINFO_GROUP + + if not self.confirm_all('Fix missing owner or group in %s on %s?' % (sd_attr, dn), 'fix_ntsecuritydescriptor_owner_group'): + self.report('Not fixing missing owner or group %s on %s\n' % (sd_attr, dn)) + return + + nmsg = ldb.Message() + nmsg.dn = dn + nmsg[sd_attr] = ldb.MessageElement(sd_val, ldb.FLAG_MOD_REPLACE, sd_attr) + + # By setting the session_info to admin_session_info and + # setting the security.SECINFO_OWNER | security.SECINFO_GROUP + # flags we cause the descriptor module to set the correct + # owner and group on the SD, replacing the None/NULL values + # for owner_sid and group_sid currently present. + # + # The admin_session_info matches that used in provision, and + # is the best guess we can make for an existing object that + # hasn't had something specifically set. + # + # This is important for the dns related naming contexts. + self.samdb.set_session_info(self.admin_session_info) + if self.do_modify(nmsg, ["sd_flags:1:%d" % sd_flags], + "Failed to fix metadata for attribute %s" % sd_attr): + self.report("Fixed attribute '%s' of '%s'\n" % (sd_attr, dn)) + self.samdb.set_session_info(self.system_session_info) + + def is_expired_tombstone(self, dn, repl_val): + if self.check_expired_tombstones: + # This is not the default, it's just + # used to keep dbcheck tests work with + # old static provision dumps + return False + + if dn in self.deleted_objects_containers: + # The Deleted Objects container will look like an expired + # tombstone + return False + + repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob, repl_val) + + isDeleted = self.find_repl_attid(repl, drsuapi.DRSUAPI_ATTID_isDeleted) + + delete_time = samba.nttime2unix(isDeleted.originating_change_time) + current_time = time.time() + + tombstone_delta = self.tombstoneLifetime * (24 * 60 * 60) + + delta = current_time - delete_time + if delta <= tombstone_delta: + return False + + expunge_time = delete_time + tombstone_delta + + delta_days = delta / (24 * 60 * 60) + + if delta_days <= 2: + self.report("SKIPPING additional checks on object " + "%s which very recently " + "became an expired tombstone (normal)" % dn) + self.report("INFO: it is expected this will be expunged " + "by the next daily task some time after %s, " + "%d hours ago" + % (time.ctime(expunge_time), delta // (60 * 60))) + else: + self.report("SKIPPING: object %s is an expired tombstone" % dn) + self.report("INFO: it was expected this object would have " + "been expunged soon after" + "%s, %d days ago" + % (time.ctime(expunge_time), delta_days)) + + self.report("isDeleted: attid=0x%08x version=%d invocation=%s usn=%s (local=%s) at %s" % ( + isDeleted.attid, + isDeleted.version, + isDeleted.originating_invocation_id, + isDeleted.originating_usn, + isDeleted.local_usn, + time.ctime(samba.nttime2unix(isDeleted.originating_change_time)))) + self.expired_tombstones += 1 + return True + + def find_changes_after_deletion(self, repl_val): + repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob, repl_val) + + isDeleted = self.find_repl_attid(repl, drsuapi.DRSUAPI_ATTID_isDeleted) + + delete_time = samba.nttime2unix(isDeleted.originating_change_time) + + tombstone_delta = self.tombstoneLifetime * (24 * 60 * 60) + + found = [] + for o in repl.ctr.array: + if o.attid == drsuapi.DRSUAPI_ATTID_isDeleted: + continue + + if o.local_usn <= isDeleted.local_usn: + continue + + if o.originating_change_time <= isDeleted.originating_change_time: + continue + + change_time = samba.nttime2unix(o.originating_change_time) + + delta = change_time - delete_time + if delta <= tombstone_delta: + continue + + # If the modification happened after the tombstone lifetime + # has passed, we have a bug as the object might be deleted + # already on other DCs and won't be able to replicate + # back + found.append(o) + + return found, isDeleted + + def has_changes_after_deletion(self, dn, repl_val): + found, isDeleted = self.find_changes_after_deletion(repl_val) + if len(found) == 0: + return False + + def report_attid(o): + try: + attname = self.samdb_schema.get_lDAPDisplayName_by_attid(o.attid) + except KeyError: + attname = "" % o.attid + + self.report("%s: attid=0x%08x version=%d invocation=%s usn=%s (local=%s) at %s" % ( + attname, o.attid, o.version, + o.originating_invocation_id, + o.originating_usn, + o.local_usn, + time.ctime(samba.nttime2unix(o.originating_change_time)))) + + self.report("ERROR: object %s, has changes after deletion" % dn) + report_attid(isDeleted) + for o in found: + report_attid(o) + + return True + + def err_changes_after_deletion(self, dn, repl_val): + found, isDeleted = self.find_changes_after_deletion(repl_val) + + in_schema_nc = dn.is_child_of(self.schema_dn) + rdn_attr = dn.get_rdn_name() + rdn_attid = self.samdb_schema.get_attid_from_lDAPDisplayName(rdn_attr, + is_schema_nc=in_schema_nc) + + unexpected = [] + for o in found: + if o.attid == rdn_attid: + continue + if o.attid == drsuapi.DRSUAPI_ATTID_name: + continue + if o.attid == drsuapi.DRSUAPI_ATTID_lastKnownParent: + continue + try: + attname = self.samdb_schema.get_lDAPDisplayName_by_attid(o.attid) + except KeyError: + attname = "" % o.attid + unexpected.append(attname) + + if len(unexpected) > 0: + self.report('Unexpeted attributes: %s' % ",".join(unexpected)) + self.report('Not fixing changes after deletion bug') + return + + if not self.confirm_all('Delete broken tombstone object %s deleted %s days ago?' % ( + dn, self.tombstoneLifetime), 'fix_changes_after_deletion_bug'): + self.report('Not fixing changes after deletion bug') + return + + if self.do_delete(dn, ["relax:0"], + "Failed to remove DN %s" % dn): + self.report("Removed DN %s" % dn) + + def has_replmetadata_zero_invocationid(self, dn, repl_meta_data): + repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob, + repl_meta_data) + ctr = repl.ctr + found = False + for o in ctr.array: + # Search for a zero invocationID + if o.originating_invocation_id != misc.GUID("00000000-0000-0000-0000-000000000000"): + continue + + found = True + self.report("""ERROR: on replPropertyMetaData of %s, the instanceType on attribute 0x%08x, + version %d changed at %s is 00000000-0000-0000-0000-000000000000, + but should be non-zero. Proposed fix is to set to our invocationID (%s).""" + % (dn, o.attid, o.version, + time.ctime(samba.nttime2unix(o.originating_change_time)), + self.samdb.get_invocation_id())) + + return found + + def err_replmetadata_zero_invocationid(self, dn, attr, repl_meta_data): + repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob, + repl_meta_data) + ctr = repl.ctr + now = samba.unix2nttime(int(time.time())) + found = False + for o in ctr.array: + # Search for a zero invocationID + if o.originating_invocation_id != misc.GUID("00000000-0000-0000-0000-000000000000"): + continue + + found = True + seq = self.samdb.sequence_number(ldb.SEQ_NEXT) + o.version = o.version + 1 + o.originating_change_time = now + o.originating_invocation_id = misc.GUID(self.samdb.get_invocation_id()) + o.originating_usn = seq + o.local_usn = seq + + if found: + replBlob = ndr_pack(repl) + msg = ldb.Message() + msg.dn = dn + + if not self.confirm_all('Fix %s on %s by setting originating_invocation_id on some elements to our invocationID %s?' + % (attr, dn, self.samdb.get_invocation_id()), 'fix_replmetadata_zero_invocationid'): + self.report('Not fixing zero originating_invocation_id in %s on %s\n' % (attr, dn)) + return + + nmsg = ldb.Message() + nmsg.dn = dn + nmsg[attr] = ldb.MessageElement(replBlob, ldb.FLAG_MOD_REPLACE, attr) + if self.do_modify(nmsg, ["local_oid:%s:0" % dsdb.DSDB_CONTROL_DBCHECK_MODIFY_RO_REPLICA, + "local_oid:1.3.6.1.4.1.7165.4.3.14:0"], + "Failed to fix attribute %s" % attr): + self.report("Fixed attribute '%s' of '%s'\n" % (attr, dn)) + + def err_replmetadata_unknown_attid(self, dn, attr, repl_meta_data): + repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob, + repl_meta_data) + ctr = repl.ctr + for o in ctr.array: + # Search for an invalid attid + try: + att = self.samdb_schema.get_lDAPDisplayName_by_attid(o.attid) + except KeyError: + self.report('ERROR: attributeID 0X%0X is not known in our schema, not fixing %s on %s\n' % (o.attid, attr, dn)) + return + + def err_replmetadata_incorrect_attid(self, dn, attr, repl_meta_data, wrong_attids): + repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob, + repl_meta_data) + fix = False + + set_att = set() + remove_attid = set() + hash_att = {} + + in_schema_nc = dn.is_child_of(self.schema_dn) + + ctr = repl.ctr + # Sort the array, except for the last element. This strange + # construction, creating a new list, due to bugs in samba's + # array handling in IDL generated objects. + ctr.array = sorted(ctr.array[:], key=lambda o: o.attid) + # Now walk it in reverse, so we see the low (and so incorrect, + # the correct values are above 0x80000000) values first and + # remove the 'second' value we see. + for o in reversed(ctr.array): + print("%s: 0x%08x" % (dn, o.attid)) + att = self.samdb_schema.get_lDAPDisplayName_by_attid(o.attid) + if att.lower() in set_att: + self.report('ERROR: duplicate attributeID values for %s in %s on %s\n' % (att, attr, dn)) + if not self.confirm_all('Fix %s on %s by removing the duplicate value 0x%08x for %s (keeping 0x%08x)?' + % (attr, dn, o.attid, att, hash_att[att].attid), + 'fix_replmetadata_duplicate_attid'): + self.report('Not fixing duplicate value 0x%08x for %s in %s on %s\n' + % (o.attid, att, attr, dn)) + return + fix = True + remove_attid.add(o.attid) + # We want to set the metadata for the most recent + # update to have been applied locally, that is the metadata + # matching the (eg string) value in the attribute + if o.local_usn > hash_att[att].local_usn: + # This is always what we would have sent over DRS, + # because the DRS server will have sent the + # msDS-IntID, but with the values from both + # attribute entries. + hash_att[att].version = o.version + hash_att[att].originating_change_time = o.originating_change_time + hash_att[att].originating_invocation_id = o.originating_invocation_id + hash_att[att].originating_usn = o.originating_usn + hash_att[att].local_usn = o.local_usn + + # Do not re-add the value to the set or overwrite the hash value + continue + + hash_att[att] = o + set_att.add(att.lower()) + + # Generate a real list we can sort on properly + new_list = [o for o in ctr.array if o.attid not in remove_attid] + + if (len(wrong_attids) > 0): + for o in new_list: + if o.attid in wrong_attids: + att = self.samdb_schema.get_lDAPDisplayName_by_attid(o.attid) + correct_attid = self.samdb_schema.get_attid_from_lDAPDisplayName(att, is_schema_nc=in_schema_nc) + self.report('ERROR: incorrect attributeID values in %s on %s\n' % (attr, dn)) + if not self.confirm_all('Fix %s on %s by replacing incorrect value 0x%08x for %s (new 0x%08x)?' + % (attr, dn, o.attid, att, hash_att[att].attid), 'fix_replmetadata_wrong_attid'): + self.report('Not fixing incorrect value 0x%08x with 0x%08x for %s in %s on %s\n' + % (o.attid, correct_attid, att, attr, dn)) + return + fix = True + o.attid = correct_attid + if fix: + # Sort the array, (we changed the value so must re-sort) + new_list[:] = sorted(new_list[:], key=lambda o: o.attid) + + # If we did not already need to fix it, then ask about sorting + if not fix: + self.report('ERROR: unsorted attributeID values in %s on %s\n' % (attr, dn)) + if not self.confirm_all('Fix %s on %s by sorting the attribute list?' + % (attr, dn), 'fix_replmetadata_unsorted_attid'): + self.report('Not fixing %s on %s\n' % (attr, dn)) + return + + # The actual sort done is done at the top of the function + + ctr.count = len(new_list) + ctr.array = new_list + replBlob = ndr_pack(repl) + + nmsg = ldb.Message() + nmsg.dn = dn + nmsg[attr] = ldb.MessageElement(replBlob, ldb.FLAG_MOD_REPLACE, attr) + if self.do_modify(nmsg, ["local_oid:%s:0" % dsdb.DSDB_CONTROL_DBCHECK_MODIFY_RO_REPLICA, + "local_oid:1.3.6.1.4.1.7165.4.3.14:0", + "local_oid:1.3.6.1.4.1.7165.4.3.25:0"], + "Failed to fix attribute %s" % attr): + self.report("Fixed attribute '%s' of '%s'\n" % (attr, dn)) + + def is_deleted_deleted_objects(self, obj): + faulty = False + if "description" not in obj: + self.report("ERROR: description not present on Deleted Objects container %s" % obj.dn) + faulty = True + if "showInAdvancedViewOnly" not in obj or str(obj['showInAdvancedViewOnly'][0]).upper() == 'FALSE': + self.report("ERROR: showInAdvancedViewOnly not present on Deleted Objects container %s" % obj.dn) + faulty = True + if "objectCategory" not in obj: + self.report("ERROR: objectCategory not present on Deleted Objects container %s" % obj.dn) + faulty = True + if "isCriticalSystemObject" not in obj or str(obj['isCriticalSystemObject'][0]).upper() == 'FALSE': + self.report("ERROR: isCriticalSystemObject not present on Deleted Objects container %s" % obj.dn) + faulty = True + if "isRecycled" in obj: + self.report("ERROR: isRecycled present on Deleted Objects container %s" % obj.dn) + faulty = True + if "isDeleted" in obj and str(obj['isDeleted'][0]).upper() == 'FALSE': + self.report("ERROR: isDeleted not set on Deleted Objects container %s" % obj.dn) + faulty = True + if "objectClass" not in obj or (len(obj['objectClass']) != 2 or + str(obj['objectClass'][0]) != 'top' or + str(obj['objectClass'][1]) != 'container'): + self.report("ERROR: objectClass incorrectly set on Deleted Objects container %s" % obj.dn) + faulty = True + if "systemFlags" not in obj or str(obj['systemFlags'][0]) != '-1946157056': + self.report("ERROR: systemFlags incorrectly set on Deleted Objects container %s" % obj.dn) + faulty = True + return faulty + + def err_deleted_deleted_objects(self, obj): + nmsg = ldb.Message() + nmsg.dn = dn = obj.dn + + if "description" not in obj: + nmsg["description"] = ldb.MessageElement("Container for deleted objects", ldb.FLAG_MOD_REPLACE, "description") + if "showInAdvancedViewOnly" not in obj: + nmsg["showInAdvancedViewOnly"] = ldb.MessageElement("TRUE", ldb.FLAG_MOD_REPLACE, "showInAdvancedViewOnly") + if "objectCategory" not in obj: + nmsg["objectCategory"] = ldb.MessageElement("CN=Container,%s" % self.schema_dn, ldb.FLAG_MOD_REPLACE, "objectCategory") + if "isCriticalSystemObject" not in obj: + nmsg["isCriticalSystemObject"] = ldb.MessageElement("TRUE", ldb.FLAG_MOD_REPLACE, "isCriticalSystemObject") + if "isRecycled" in obj: + nmsg["isRecycled"] = ldb.MessageElement("TRUE", ldb.FLAG_MOD_DELETE, "isRecycled") + + nmsg["isDeleted"] = ldb.MessageElement("TRUE", ldb.FLAG_MOD_REPLACE, "isDeleted") + nmsg["systemFlags"] = ldb.MessageElement("-1946157056", ldb.FLAG_MOD_REPLACE, "systemFlags") + nmsg["objectClass"] = ldb.MessageElement(["top", "container"], ldb.FLAG_MOD_REPLACE, "objectClass") + + if not self.confirm_all('Fix Deleted Objects container %s by restoring default attributes?' + % (dn), 'fix_deleted_deleted_objects'): + self.report('Not fixing missing/incorrect attributes on %s\n' % (dn)) + return + + if self.do_modify(nmsg, ["relax:0"], + "Failed to fix Deleted Objects container %s" % dn): + self.report("Fixed Deleted Objects container '%s'\n" % (dn)) + + def err_replica_locations(self, obj, cross_ref, attr): + nmsg = ldb.Message() + nmsg.dn = cross_ref + target = self.samdb.get_dsServiceName() + + if self.samdb.am_rodc(): + self.report('Not fixing %s %s for the RODC' % (attr, obj.dn)) + return + + if not self.confirm_all('Add yourself to the replica locations for %s?' + % (obj.dn), 'fix_replica_locations'): + self.report('Not fixing missing/incorrect attributes on %s\n' % (obj.dn)) + return + + nmsg[attr] = ldb.MessageElement(target, ldb.FLAG_MOD_ADD, attr) + if self.do_modify(nmsg, [], "Failed to add %s for %s" % (attr, obj.dn)): + self.report("Fixed %s for %s" % (attr, obj.dn)) + + def is_fsmo_role(self, dn): + if dn == self.samdb.domain_dn: + return True + if dn == self.infrastructure_dn: + return True + if dn == self.naming_dn: + return True + if dn == self.schema_dn: + return True + if dn == self.rid_dn: + return True + + return False + + def calculate_instancetype(self, dn): + instancetype = 0 + nc_root = self.samdb.get_nc_root(dn) + if dn == nc_root: + instancetype |= dsdb.INSTANCE_TYPE_IS_NC_HEAD + try: + self.samdb.search(base=dn.parent(), scope=ldb.SCOPE_BASE, attrs=[], controls=["show_recycled:1"]) + except ldb.LdbError as e4: + (enum, estr) = e4.args + if enum != ldb.ERR_NO_SUCH_OBJECT: + raise + else: + instancetype |= dsdb.INSTANCE_TYPE_NC_ABOVE + if self.write_ncs is not None and str(nc_root) in [str(x) for x in self.write_ncs]: + instancetype |= dsdb.INSTANCE_TYPE_WRITE + + return instancetype + + def get_wellknown_sd(self, dn): + for [sd_dn, descriptor_fn] in self.wellknown_sds: + if dn == sd_dn: + domain_sid = security.dom_sid(self.samdb.get_domain_sid()) + return ndr_unpack(security.descriptor, + descriptor_fn(domain_sid, + name_map=self.name_map)) + + raise KeyError + + def find_checkable_attrs(self, dn, requested_attrs): + """A helper function for check_object() that calculates the list of + attributes that need to be checked, and returns that as a list + in the original case, and a set normalised to lowercase (for + easy existence checks). + """ + if requested_attrs is None: + attrs = ['*'] + else: + attrs = list(requested_attrs) + + lc_attrs = set(x.lower() for x in attrs) + + def add_attr(a): + if a.lower() not in lc_attrs: + attrs.append(a) + lc_attrs.add(a.lower()) + + if ("dn" in lc_attrs or + "distinguishedname" in lc_attrs or + dn.get_rdn_name().lower() in lc_attrs): + attrs.append("name") + lc_attrs.add('name') + + if 'name' in lc_attrs: + for a in (dn.get_rdn_name(), + "isDeleted", + "systemFlags"): + add_attr(a) + + need_replPropertyMetaData = False + if '*' in lc_attrs: + need_replPropertyMetaData = True + else: + for a in attrs: + linkID, _ = self.get_attr_linkID_and_reverse_name(a) + if linkID == 0: + continue + if linkID & 1: + continue + need_replPropertyMetaData = True + break + if need_replPropertyMetaData: + add_attr("replPropertyMetaData") + + add_attr("objectGUID") + + return attrs, lc_attrs + + def check_object(self, dn, requested_attrs=None): + """check one object""" + if self.verbose: + self.report("Checking object %s" % dn) + + # search attrs are used to find the attributes, lc_attrs are + # used for existence checks + search_attrs, lc_attrs = self.find_checkable_attrs(dn, requested_attrs) + + try: + sd_flags = 0 + sd_flags |= security.SECINFO_OWNER + sd_flags |= security.SECINFO_GROUP + sd_flags |= security.SECINFO_DACL + sd_flags |= security.SECINFO_SACL + + res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE, + controls=[ + "extended_dn:1:1", + "show_recycled:1", + "show_deleted:1", + "sd_flags:1:%d" % sd_flags, + "reveal_internals:0", + ], + attrs=search_attrs) + except ldb.LdbError as e10: + (enum, estr) = e10.args + if enum == ldb.ERR_NO_SUCH_OBJECT: + if self.in_transaction: + self.report("ERROR: Object %s disappeared during check" % dn) + return 1 + return 0 + raise + if len(res) != 1: + self.report("ERROR: Object %s failed to load during check" % dn) + return 1 + obj = res[0] + error_count = 0 + set_attrs_from_md = set() + set_attrs_seen = set() + got_objectclass = False + + nc_dn = self.samdb.get_nc_root(obj.dn) + try: + deleted_objects_dn = self.samdb.get_wellknown_dn(nc_dn, + samba.dsdb.DS_GUID_DELETED_OBJECTS_CONTAINER) + except KeyError: + # We have no deleted objects DN for schema, and we check for this above for the other + # NCs + deleted_objects_dn = None + + object_rdn_attr = None + object_rdn_val = None + name_val = None + isDeleted = False + systemFlags = 0 + repl_meta_data_val = None + + for attrname in obj: + if attrname.lower() == 'isdeleted': + if str(obj[attrname][0]) != "FALSE": + isDeleted = True + + if attrname.lower() == 'systemflags': + systemFlags = int(obj[attrname][0]) + + if attrname.lower() == 'replpropertymetadata': + repl_meta_data_val = obj[attrname][0] + + if isDeleted and repl_meta_data_val: + if self.has_changes_after_deletion(dn, repl_meta_data_val): + error_count += 1 + self.err_changes_after_deletion(dn, repl_meta_data_val) + return error_count + if self.is_expired_tombstone(dn, repl_meta_data_val): + return error_count + + for attrname in obj: + if attrname == 'dn' or attrname == "distinguishedName": + continue + + if attrname.lower() == 'objectclass': + got_objectclass = True + + if attrname.lower() == "name": + if len(obj[attrname]) != 1: + self.unfixable_errors += 1 + self.report("ERROR: Not fixing num_values(%d) for '%s' on '%s'" % + (len(obj[attrname]), attrname, str(obj.dn))) + else: + name_val = str(obj[attrname][0]) + + if attrname.lower() == str(obj.dn.get_rdn_name()).lower(): + object_rdn_attr = attrname + if len(obj[attrname]) != 1: + self.unfixable_errors += 1 + self.report("ERROR: Not fixing num_values(%d) for '%s' on '%s'" % + (len(obj[attrname]), attrname, str(obj.dn))) + else: + object_rdn_val = str(obj[attrname][0]) + + if attrname.lower() == 'replpropertymetadata': + if self.has_replmetadata_zero_invocationid(dn, obj[attrname][0]): + error_count += 1 + self.err_replmetadata_zero_invocationid(dn, attrname, obj[attrname][0]) + # We don't continue, as we may also have other fixes for this attribute + # based on what other attributes we see. + + try: + (set_attrs_from_md, list_attid_from_md, wrong_attids) \ + = self.process_metadata(dn, obj[attrname][0]) + except KeyError: + error_count += 1 + self.err_replmetadata_unknown_attid(dn, attrname, obj[attrname]) + continue + + if len(set_attrs_from_md) < len(list_attid_from_md) \ + or len(wrong_attids) > 0 \ + or sorted(list_attid_from_md) != list_attid_from_md: + error_count += 1 + self.err_replmetadata_incorrect_attid(dn, attrname, obj[attrname][0], wrong_attids) + + else: + # Here we check that the first attid is 0 + # (objectClass). + if list_attid_from_md[0] != 0: + self.unfixable_errors += 1 + self.report("ERROR: Not fixing incorrect initial attributeID in '%s' on '%s', it should be objectClass" % + (attrname, str(dn))) + + continue + + if attrname.lower() == 'ntsecuritydescriptor': + (sd, sd_broken) = self.process_sd(dn, obj) + if sd_broken is not None: + self.err_wrong_sd(dn, sd, sd_broken) + error_count += 1 + continue + + if sd.owner_sid is None or sd.group_sid is None: + self.err_missing_sd_owner(dn, sd) + error_count += 1 + continue + + if dn == deleted_objects_dn or self.reset_well_known_acls: + try: + well_known_sd = self.get_wellknown_sd(dn) + except KeyError: + continue + + current_sd = ndr_unpack(security.descriptor, + obj[attrname][0]) + + ignoreAdditionalACEs = False + if not self.reset_well_known_acls: + ignoreAdditionalACEs = True + + diff = get_diff_sds(well_known_sd, current_sd, + security.dom_sid(self.samdb.get_domain_sid()), + ignoreAdditionalACEs=ignoreAdditionalACEs) + if diff != "": + self.err_wrong_default_sd(dn, well_known_sd, diff) + error_count += 1 + continue + continue + + if attrname.lower() == 'objectclass': + normalised = self.samdb.dsdb_normalise_attributes(self.samdb_schema, attrname, obj[attrname]) + # Do not consider the attribute incorrect if: + # - The sorted (alphabetically) list is the same, including case + # - The first and last elements are the same + # + # This avoids triggering an error due to + # non-determinism in the sort routine in (at least) + # 4.3 and earlier, and the fact that any AUX classes + # in these attributes are also not sorted when + # imported from Windows (they are just in the reverse + # order of last set) + if sorted(normalised) != sorted(obj[attrname]) \ + or normalised[0] != obj[attrname][0] \ + or normalised[-1] != obj[attrname][-1]: + self.err_normalise_mismatch_replace(dn, attrname, list(obj[attrname])) + error_count += 1 + continue + + if attrname.lower() == 'userparameters': + userparams = obj[attrname][0] + if userparams == b' ': + error_count += 1 + self.err_short_userParameters(obj, attrname, obj[attrname]) + continue + + elif userparams[:16] == b'\x20\x00' * 8: + # This is the correct, normal prefix + continue + + elif userparams[:20] == b'IAAgACAAIAAgACAAIAAg': + # this is the typical prefix from a windows migration + error_count += 1 + self.err_base64_userParameters(obj, attrname, obj[attrname]) + continue + + #43:00:00:00:74:00:00:00:78 + elif (userparams[1] != 0 and + userparams[3] != 0 and + userparams[5] != 0 and + userparams[7] != 0 and + userparams[9] != 0): + # This is a prefix that is not in UTF-16 format + # for the space or munged dialback prefix + error_count += 1 + self.err_utf8_userParameters(obj, attrname, obj[attrname]) + continue + + elif len(userparams) % 2 != 0: + # This is a value that isn't even in length + error_count += 1 + self.err_odd_userParameters(obj, attrname) + continue + + elif (userparams[1] == 0 and + userparams[2] == 0 and + userparams[3] == 0 and + userparams[4] != 0 and + userparams[5] == 0): + # This is a prefix that would happen if a + # SAMR-written value was replicated from a Samba + # 4.1 server to a working server + error_count += 1 + self.err_doubled_userParameters(obj, attrname, obj[attrname]) + continue + + if attrname.lower() == 'attributeid' or attrname.lower() == 'governsid': + if obj[attrname][0] in self.attribute_or_class_ids: + self.unfixable_errors += 1 + self.report('Error: %s %s on %s already exists as an attributeId or governsId' + % (attrname, obj.dn, obj[attrname][0])) + else: + self.attribute_or_class_ids.add(obj[attrname][0]) + + # check for empty attributes + for val in obj[attrname]: + if val == b'': + self.err_empty_attribute(dn, attrname) + error_count += 1 + continue + + # get the syntax oid for the attribute, so we can can have + # special handling for some specific attribute types + try: + syntax_oid = self.samdb_schema.get_syntax_oid_from_lDAPDisplayName(attrname) + except Exception as msg: + self.err_unknown_attribute(obj, attrname) + error_count += 1 + continue + + linkID, reverse_link_name = self.get_attr_linkID_and_reverse_name(attrname) + + flag = self.samdb_schema.get_systemFlags_from_lDAPDisplayName(attrname) + if (not flag & dsdb.DS_FLAG_ATTR_NOT_REPLICATED + and not flag & dsdb.DS_FLAG_ATTR_IS_CONSTRUCTED + and not linkID): + set_attrs_seen.add(attrname.lower()) + + if syntax_oid in [dsdb.DSDB_SYNTAX_BINARY_DN, dsdb.DSDB_SYNTAX_OR_NAME, + dsdb.DSDB_SYNTAX_STRING_DN, ldb.SYNTAX_DN]: + # it's some form of DN, do specialised checking on those + error_count += self.check_dn(obj, attrname, syntax_oid) + else: + + values = set() + # check for incorrectly normalised attributes + for val in obj[attrname]: + values.add(val) + + normalised = self.samdb.dsdb_normalise_attributes(self.samdb_schema, attrname, [val]) + if len(normalised) != 1 or normalised[0] != val: + self.err_normalise_mismatch(dn, attrname, obj[attrname]) + error_count += 1 + break + + if len(obj[attrname]) != len(values): + self.err_duplicate_values(dn, attrname, obj[attrname], list(values)) + error_count += 1 + break + + if attrname.lower() == "instancetype": + calculated_instancetype = self.calculate_instancetype(dn) + if len(obj["instanceType"]) != 1 or int(obj["instanceType"][0]) != calculated_instancetype: + error_count += 1 + self.err_wrong_instancetype(obj, calculated_instancetype) + + if not got_objectclass and ("*" in lc_attrs or "objectclass" in lc_attrs): + error_count += 1 + self.err_missing_objectclass(dn) + + if ("*" in lc_attrs or "name" in lc_attrs): + if name_val is None: + self.unfixable_errors += 1 + self.report("ERROR: Not fixing missing 'name' on '%s'" % (str(obj.dn))) + if object_rdn_attr is None: + self.unfixable_errors += 1 + self.report("ERROR: Not fixing missing '%s' on '%s'" % (obj.dn.get_rdn_name(), str(obj.dn))) + + if name_val is not None: + parent_dn = None + controls = ["show_recycled:1", "relax:0"] + if isDeleted: + if not (systemFlags & samba.dsdb.SYSTEM_FLAG_DISALLOW_MOVE_ON_DELETE): + parent_dn = deleted_objects_dn + controls += ["local_oid:%s:1" % dsdb.DSDB_CONTROL_DBCHECK_FIX_LINK_DN_NAME] + if parent_dn is None: + parent_dn = obj.dn.parent() + + try: + expected_dn = ldb.Dn(self.samdb, "RDN=RDN,%s" % (parent_dn)) + except ValueError as e: + self.unfixable_errors += 1 + self.report(f"ERROR: could not handle parent DN '{parent_dn}': " + "skipping RDN checks") + else: + expected_dn.set_component(0, obj.dn.get_rdn_name(), name_val) + + if obj.dn == deleted_objects_dn: + expected_dn = obj.dn + + if expected_dn != obj.dn: + error_count += 1 + self.err_wrong_dn(obj, expected_dn, object_rdn_attr, + object_rdn_val, name_val, controls) + elif obj.dn.get_rdn_value() != object_rdn_val: + self.unfixable_errors += 1 + self.report("ERROR: Not fixing %s=%r on '%s'" % (object_rdn_attr, + object_rdn_val, + obj.dn)) + + show_dn = True + if repl_meta_data_val: + if obj.dn == deleted_objects_dn: + isDeletedAttId = 131120 + # It's 29/12/9999 at 23:59:59 UTC as specified in MS-ADTS 7.1.1.4.2 Deleted Objects Container + + expectedTimeDo = 2650466015990000000 + originating = self.get_originating_time(repl_meta_data_val, isDeletedAttId) + if originating != expectedTimeDo: + if self.confirm_all("Fix isDeleted originating_change_time on '%s'" % str(dn), 'fix_time_metadata'): + nmsg = ldb.Message() + nmsg.dn = dn + nmsg["isDeleted"] = ldb.MessageElement("TRUE", ldb.FLAG_MOD_REPLACE, "isDeleted") + error_count += 1 + self.samdb.modify(nmsg, controls=["provision:0"]) + + else: + self.report("Not fixing isDeleted originating_change_time on '%s'" % str(dn)) + + for att in set_attrs_seen.difference(set_attrs_from_md): + if show_dn: + self.report("On object %s" % dn) + show_dn = False + error_count += 1 + self.report("ERROR: Attribute %s not present in replication metadata" % att) + if not self.confirm_all("Fix missing replPropertyMetaData element '%s'" % att, 'fix_all_metadata'): + self.report("Not fixing missing replPropertyMetaData element '%s'" % att) + continue + self.fix_metadata(obj, att) + + if self.is_fsmo_role(dn): + if "fSMORoleOwner" not in obj and ("*" in lc_attrs or "fsmoroleowner" in lc_attrs): + self.err_no_fsmoRoleOwner(obj) + error_count += 1 + + try: + if dn != self.samdb.get_root_basedn() and str(dn.parent()) not in self.dn_set: + res = self.samdb.search(base=dn.parent(), scope=ldb.SCOPE_BASE, + controls=["show_recycled:1", "show_deleted:1"]) + except ldb.LdbError as e11: + (enum, estr) = e11.args + if enum == ldb.ERR_NO_SUCH_OBJECT: + if isDeleted: + self.report("WARNING: parent object not found for %s" % (obj.dn)) + self.report("Not moving to LostAndFound " + "(tombstone garbage collection in progress?)") + else: + self.err_missing_parent(obj) + error_count += 1 + else: + raise + + if dn in self.deleted_objects_containers and '*' in lc_attrs: + if self.is_deleted_deleted_objects(obj): + self.err_deleted_deleted_objects(obj) + error_count += 1 + + for (dns_part, msg) in self.dns_partitions: + if dn == dns_part and 'repsFrom' in obj: + location = "msDS-NC-Replica-Locations" + if self.samdb.am_rodc(): + location = "msDS-NC-RO-Replica-Locations" + + if location not in msg: + # There are no replica locations! + self.err_replica_locations(obj, msg.dn, location) + error_count += 1 + continue + + found = False + for loc in msg[location]: + if str(loc) == self.samdb.get_dsServiceName(): + found = True + if not found: + # This DC is not in the replica locations + self.err_replica_locations(obj, msg.dn, location) + error_count += 1 + + if dn == self.server_ref_dn: + # Check we have a valid RID Set + if "*" in lc_attrs or "ridsetreferences" in lc_attrs: + if "rIDSetReferences" not in obj: + # NO RID SET reference + # We are RID master, allocate it. + error_count += 1 + + if self.is_rid_master: + # Allocate a RID Set + if self.confirm_all('Allocate the missing RID set for ' + 'RID master?', + 'fix_missing_rid_set_master'): + + # We don't have auto-transaction logic on + # extended operations, so we have to do it + # here. + + self.samdb.transaction_start() + + try: + self.samdb.create_own_rid_set() + + except: + self.samdb.transaction_cancel() + raise + + self.samdb.transaction_commit() + + elif not self.samdb.am_rodc(): + self.report("No RID Set found for this server: %s, " + "and we are not the RID Master (so can " + "not self-allocate)" % dn) + + # Check some details of our own RID Set + # + # Note that the attributes have very bad names. From ridalloc.c: + # + # Note: the RID allocation attributes in AD are very badly named. + # Here is what we think they really do: + # + # in RID Set object: + # - rIDPreviousAllocationPool: the pool which a DC is currently + # pulling RIDs from. Managed by client DC + # + # - rIDAllocationPool: the pool that the DC will switch to next, + # when rIDPreviousAllocationPool is exhausted. Managed by RID + # Manager. + # + # - rIDNextRID: the last RID allocated by this DC. Managed by + # client DC + # + # in RID Manager object: + # - rIDAvailablePool: the pool where the RID Manager gets new rID + # pools from when it gets a EXOP_RID_ALLOC getncchanges call + # (or locally when the DC is the RID Manager) + + if dn == self.rid_set_dn: + pool_attrs = ["rIDAllocationPool", "rIDPreviousAllocationPool"] + + res = self.samdb.search(base=self.rid_set_dn, scope=ldb.SCOPE_BASE, + attrs=pool_attrs) + + for pool_attr in pool_attrs: + if pool_attr not in res[0]: + continue + + pool = int(res[0][pool_attr][0]) + + high = pool >> 32 + low = 0xFFFFFFFF & pool + + if pool != 0 and low >= high: + self.report("Invalid RID pool %d-%d, %d >= %d!" % + (low, high, low, high)) + self.unfixable_errors += 1 + + if "rIDAllocationPool" not in res[0]: + self.report("No rIDAllocationPool found in %s" % dn) + self.unfixable_errors += 1 + + try: + next_free_rid, high = self.samdb.free_rid_bounds() + except ldb.LdbError as err: + enum, estr = err.args + self.report("Couldn't get available RIDs: %s" % estr) + self.unfixable_errors += 1 + else: + # Check the remainder of this pool for conflicts. If + # ridalloc_allocate_rid() moves to a new pool, this + # will be above high, so we will stop. + domain_sid = self.samdb.get_domain_sid() + while next_free_rid <= high: + sid = "%s-%d" % (domain_sid, next_free_rid) + try: + res = self.samdb.search(base="" % sid, + scope=ldb.SCOPE_BASE, + attrs=[]) + except ldb.LdbError as e: + (enum, estr) = e.args + if enum != ldb.ERR_NO_SUCH_OBJECT: + raise + res = None + if res is not None: + self.report("SID %s for %s conflicts with our current " + "RID set in %s" % (sid, res[0].dn, dn)) + error_count += 1 + + if self.confirm_all('Fix conflict between SID %s and ' + 'RID pool in %s by allocating a ' + 'new RID?' + % (sid, dn), + 'fix_sid_rid_set_conflict'): + self.samdb.transaction_start() + + # This will burn RIDs, which will move + # past the conflict. We then check again + # to see if the new RID conflicts, until + # the end of the current pool. We don't + # look at the next pool to avoid burning + # all RIDs in one go in some strange + # failure case. + try: + while True: + allocated_rid = self.samdb.allocate_rid() + if allocated_rid >= next_free_rid: + next_free_rid = allocated_rid + 1 + break + except: + self.samdb.transaction_cancel() + raise + + self.samdb.transaction_commit() + else: + break + else: + next_free_rid += 1 + + return error_count + + ################################################################ + # check special @ROOTDSE attributes + def check_rootdse(self): + """check the @ROOTDSE special object""" + dn = ldb.Dn(self.samdb, '@ROOTDSE') + if self.verbose: + self.report("Checking object %s" % dn) + res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE) + if len(res) != 1: + self.report("Object %s disappeared during check" % dn) + return 1 + obj = res[0] + error_count = 0 + + # check that the dsServiceName is in GUID form + if 'dsServiceName' not in obj: + self.report('ERROR: dsServiceName missing in @ROOTDSE') + return error_count + 1 + + if not str(obj['dsServiceName'][0]).startswith('" % guid_str, + ldb.FLAG_MOD_REPLACE, 'dsServiceName') + if self.do_modify(m, [], "Failed to change dsServiceName to GUID form", validate=False): + self.report("Changed dsServiceName to GUID form") + return error_count + + ############################################### + # re-index the database + + def reindex_database(self): + """re-index the whole database""" + m = ldb.Message() + m.dn = ldb.Dn(self.samdb, "@ATTRIBUTES") + m['add'] = ldb.MessageElement('NONE', ldb.FLAG_MOD_ADD, 'force_reindex') + m['delete'] = ldb.MessageElement('NONE', ldb.FLAG_MOD_DELETE, 'force_reindex') + return self.do_modify(m, [], 're-indexed database', validate=False) + + ############################################### + # reset @MODULES + def reset_modules(self): + """reset @MODULES to that needed for current sam.ldb (to read a very old database)""" + m = ldb.Message() + m.dn = ldb.Dn(self.samdb, "@MODULES") + m['@LIST'] = ldb.MessageElement('samba_dsdb', ldb.FLAG_MOD_REPLACE, '@LIST') + return self.do_modify(m, [], 'reset @MODULES on database', validate=False) diff --git a/python/samba/descriptor.py b/python/samba/descriptor.py new file mode 100644 index 0000000..362510c --- /dev/null +++ b/python/samba/descriptor.py @@ -0,0 +1,723 @@ + +# Unix SMB/CIFS implementation. +# backend code for provisioning a Samba4 server + +# Copyright (C) Jelmer Vernooij 2007-2010 +# Copyright (C) Andrew Bartlett 2008-2009 +# Copyright (C) Oliver Liebel 2008-2009 +# Copyright (C) Amitay Isaacs 2011 +# +# Based on the original in EJS: +# Copyright (C) Andrew Tridgell 2005 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +"""Functions for setting up a Samba configuration (security descriptors).""" + +from samba.dcerpc import security +from samba.ndr import ndr_pack +from samba.schema import get_schema_descriptor +import ldb +import re + +# Descriptors of naming contexts and other important objects + + +def sddl2binary(sddl_in, domain_sid, name_map): + sddl = "%s" % sddl_in + + for [name, sid] in name_map.items(): + sddl = sddl.replace(name, sid) + + sec = security.descriptor.from_sddl(sddl, domain_sid) + return ndr_pack(sec) + + +def get_empty_descriptor(domain_sid, name_map=None): + if name_map is None: + name_map = {} + + sddl = "" + return sddl2binary(sddl, domain_sid, name_map) + +# "get_schema_descriptor" is located in "schema.py" + + +def get_deletedobjects_descriptor(domain_sid, name_map=None): + if name_map is None: + name_map = {} + + sddl = "O:SYG:SYD:PAI" \ + "(A;;RPWPCCDCLCRCWOWDSDSW;;;SY)" \ + "(A;;RPLC;;;BA)" + return sddl2binary(sddl, domain_sid, name_map) + + +def get_config_descriptor(domain_sid, name_map=None): + if name_map is None: + name_map = {} + + sddl = "O:EAG:EAD:(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \ + "(OA;;CR;1131f6ab-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \ + "(OA;;CR;1131f6ac-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \ + "(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \ + "(OA;;CR;1131f6ab-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \ + "(OA;;CR;1131f6ac-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \ + "(A;;RPLCLORC;;;AU)(A;CI;RPWPCRCCDCLCLORCWOWDSDDTSW;;;EA)" \ + "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)(A;CIIO;RPWPCRCCLCLORCWOWDSDSW;;;DA)" \ + "(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \ + "(OA;;CR;89e95b76-444d-4c62-991a-0facbeda640c;;ED)" \ + "(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \ + "(OA;;CR;89e95b76-444d-4c62-991a-0facbeda640c;;BA)" \ + "(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;RO)" \ + "S:(AU;SA;WPWOWD;;;WD)(AU;SA;CR;;;BA)(AU;SA;CR;;;DU)" \ + "(OU;SA;CR;45ec5156-db7e-47bb-b53f-dbeb2d03c40f;;WD)" + return sddl2binary(sddl, domain_sid, name_map) + + +def get_config_partitions_descriptor(domain_sid, name_map=None): + if name_map is None: + name_map = {} + + sddl = "D:" \ + "(A;;LCLORC;;;AU)" \ + "(OA;;RP;e48d0154-bcf8-11d1-8702-00c04fb96050;;AU)" \ + "(OA;;RP;d31a8757-2447-4545-8081-3bb610cacbf2;;AU)" \ + "(OA;;RP;66171887-8f3c-11d0-afda-00c04fd930c9;;AU)" \ + "(OA;;RP;032160bf-9824-11d1-aec0-0000f80367c1;;AU)" \ + "(OA;;RP;789ee1eb-8c8e-4e4c-8cec-79b31b7617b5;;AU)" \ + "(OA;;RP;5706aeaf-b940-4fb2-bcfc-5268683ad9fe;;AU)" \ + "(A;;RPWPCRCCLCLORCWOWDSW;;;EA)" \ + "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \ + "(A;;CC;;;ED)" \ + "(OA;CIIO;WP;3df793df-9858-4417-a701-735a1ecebf74;bf967a8d-0de6-11d0-a285-00aa003049e2;BA)" \ + "S:" \ + "(AU;CISA;WPCRCCDCWOWDSDDT;;;WD)" + return sddl2binary(sddl, domain_sid, name_map) + + +def get_config_sites_descriptor(domain_sid, name_map=None): + if name_map is None: + name_map = {} + + sddl = "D:" \ + "(A;;RPLCLORC;;;AU)" \ + "(OA;CIIO;SW;d31a8757-2447-4545-8081-3bb610cacbf2;f0f8ffab-1191-11d0-a060-00aa006c33ed;RO)" \ + "(A;;RPWPCRCCLCLORCWOWDSW;;;EA)" \ + "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \ + "S:" \ + "(AU;CISA;CCDCSDDT;;;WD)" \ + "(OU;CIIOSA;CR;;f0f8ffab-1191-11d0-a060-00aa006c33ed;WD)" \ + "(OU;CIIOSA;WP;f30e3bbe-9ff0-11d1-b603-0000f80367c1;bf967ab3-0de6-11d0-a285-00aa003049e2;WD)" \ + "(OU;CIIOSA;WP;f30e3bbf-9ff0-11d1-b603-0000f80367c1;bf967ab3-0de6-11d0-a285-00aa003049e2;WD)" \ + "(OU;CIIOSA;WP;3e10944c-c354-11d0-aff8-0000f80367c1;b7b13124-b82e-11d0-afee-0000f80367c1;WD)" + return sddl2binary(sddl, domain_sid, name_map) + + +def get_config_ntds_quotas_descriptor(domain_sid, name_map=None): + if name_map is None: + name_map = {} + + sddl = "D:" \ + "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;EA)" \ + "(A;;RPLCLORC;;;BA)" \ + "(OA;;CR;4ecc03fe-ffc0-4947-b630-eb672a8a9dbc;;WD)" + return sddl2binary(sddl, domain_sid, name_map) + + +def get_config_delete_protected1_descriptor(domain_sid, name_map=None): + if name_map is None: + name_map = {} + + sddl = "D:AI" \ + "(A;;RPLCLORC;;;AU)" \ + "(A;;RPWPCRCCLCLORCWOWDSW;;;EA)" \ + "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" + return sddl2binary(sddl, domain_sid, name_map) + + +def get_config_delete_protected1wd_descriptor(domain_sid, name_map=None): + if name_map is None: + name_map = {} + + sddl = "D:AI" \ + "(A;;RPLCLORC;;;WD)" \ + "(A;;RPWPCRCCLCLORCWOWDSW;;;EA)" \ + "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" + return sddl2binary(sddl, domain_sid, name_map) + + +def get_config_delete_protected2_descriptor(domain_sid, name_map=None): + if name_map is None: + name_map = {} + + sddl = "D:AI" \ + "(A;;RPLCLORC;;;AU)" \ + "(A;;RPWPCRCCDCLCLORCWOWDSW;;;EA)" \ + "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" + return sddl2binary(sddl, domain_sid, name_map) + + +def get_domain_descriptor(domain_sid, name_map=None): + if name_map is None: + name_map = {} + + sddl = "O:BAG:BAD:AI(OA;CIIO;RP;4c164200-20c0-11d0-a768-00aa006e0529;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \ + "(OA;CIIO;RP;4c164200-20c0-11d0-a768-00aa006e0529;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \ + "(OA;CIIO;RP;5f202010-79a5-11d0-9020-00c04fc2d4cf;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \ + "(OA;CIIO;RP;5f202010-79a5-11d0-9020-00c04fc2d4cf;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \ + "(OA;CIIO;RP;bc0ac240-79a9-11d0-9020-00c04fc2d4cf;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \ + "(OA;CIIO;RP;bc0ac240-79a9-11d0-9020-00c04fc2d4cf;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \ + "(OA;CIIO;RP;59ba2f42-79a2-11d0-9020-00c04fc2d3cf;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \ + "(OA;CIIO;RP;59ba2f42-79a2-11d0-9020-00c04fc2d3cf;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \ + "(OA;CIIO;RP;037088f8-0ae1-11d2-b422-00a0c968f939;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \ + "(OA;CIIO;RP;037088f8-0ae1-11d2-b422-00a0c968f939;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \ + "(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;RO)" \ + "(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;DD)" \ + "(OA;CIIO;RP;b7c69e6d-2cc7-11d2-854e-00a0c983f608;bf967a86-0de6-11d0-a285-00aa003049e2;ED)" \ + "(OA;CIIO;RP;b7c69e6d-2cc7-11d2-854e-00a0c983f608;bf967a9c-0de6-11d0-a285-00aa003049e2;ED)" \ + "(OA;CIIO;RP;b7c69e6d-2cc7-11d2-854e-00a0c983f608;bf967aba-0de6-11d0-a285-00aa003049e2;ED)" \ + "(OA;;CR;89e95b76-444d-4c62-991a-0facbeda640c;;BA)" \ + "(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \ + "(OA;;CR;1131f6ab-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \ + "(OA;;CR;1131f6ac-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \ + "(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \ + "(OA;;CR;1131f6ae-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \ + "(OA;;CR;e2a36dc9-ae17-47c3-b58b-be34c55ba633;;S-1-5-32-557)" \ + "(OA;;RP;c7407360-20bf-11d0-a768-00aa006e0529;;RU)" \ + "(OA;;RP;b8119fd0-04f6-4762-ab7a-4986c76b3f9a;;RU)" \ + "(OA;CIIO;RPLCLORC;;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \ + "(OA;CIIO;RPLCLORC;;bf967a9c-0de6-11d0-a285-00aa003049e2;RU)" \ + "(OA;CIIO;RPLCLORC;;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \ + "(OA;;CR;05c74c5e-4deb-43b4-bd9f-86664c2a7fd5;;AU)" \ + "(OA;;CR;89e95b76-444d-4c62-991a-0facbeda640c;;ED)" \ + "(OA;;CR;ccc2dc7d-a6ad-4a7a-8846-c04e3cc53501;;AU)" \ + "(OA;;CR;280f369c-67c7-438e-ae98-1d46f3c6f541;;AU)" \ + "(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \ + "(OA;;CR;1131f6ab-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \ + "(OA;;CR;1131f6ac-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \ + "(OA;;CR;1131f6ae-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \ + "(OA;;RP;b8119fd0-04f6-4762-ab7a-4986c76b3f9a;;AU)" \ + "(OA;CIIO;RPWPCR;91e647de-d96f-4b70-9557-d63ff4f3ccd8;;PS)" \ + "(OA;CIIO;WP;ea1b7b93-5e48-46d5-bc6c-4df4fda78a35;bf967a86-0de6-11d0-a285-00aa003049e2;PS)" \ + "(OA;;CR;3e0f7e18-2c7a-4c10-ba82-4d926db99a3e;;CN)" \ + "(OA;OICI;RPWP;3f78c3e5-f79a-46bd-a0b8-9d18116ddc79;;PS)" \ + "(OA;CI;RPWP;5b47d60f-6090-40b2-9f37-2a4de88f3063;;KA)" \ + "(OA;CI;RPWP;5b47d60f-6090-40b2-9f37-2a4de88f3063;;EK)" \ + "(OA;CIIO;SW;9b026da6-0d3c-465c-8bee-5199d7165cba;bf967a86-0de6-11d0-a285-00aa003049e2;PS)" \ + "(OA;CIIO;SW;9b026da6-0d3c-465c-8bee-5199d7165cba;bf967a86-0de6-11d0-a285-00aa003049e2;CO)" \ + "(A;;RPWPCRCCLCLORCWOWDSW;;;DA)" \ + "(A;CI;RPWPCRCCDCLCLORCWOWDSDDTSW;;;EA)" \ + "(A;;RPRC;;;RU)" \ + "(A;CI;LC;;;RU)" \ + "(A;CI;RPWPCRCCLCLORCWOWDSDSW;;;BA)" \ + "(A;;RP;;;WD)" \ + "(A;;RPLCLORC;;;ED)" \ + "(A;;RPLCLORC;;;AU)" \ + "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \ + "S:AI(OU;CISA;WP;f30e3bbe-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)" \ + "(OU;CISA;WP;f30e3bbf-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)" \ + "(AU;SA;CR;;;DU)(AU;SA;CR;;;BA)(AU;SA;WPWOWD;;;WD)" + return sddl2binary(sddl, domain_sid, name_map) + + +def get_domain_infrastructure_descriptor(domain_sid, name_map=None): + if name_map is None: + name_map = {} + + sddl = "D:" \ + "(A;;RPLCLORC;;;AU)" \ + "(A;;RPWPCRCCLCLORCWOWDSW;;;DA)" \ + "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \ + "S:" \ + "(AU;SA;WPCR;;;WD)" + return sddl2binary(sddl, domain_sid, name_map) + + +def get_domain_builtin_descriptor(domain_sid, name_map=None): + if name_map is None: + name_map = {} + + sddl = "D:" \ + "(OA;CIIO;RP;4c164200-20c0-11d0-a768-00aa006e0529;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \ + "(OA;CIIO;RP;4c164200-20c0-11d0-a768-00aa006e0529;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \ + "(OA;CIIO;RP;5f202010-79a5-11d0-9020-00c04fc2d4cf;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \ + "(OA;CIIO;RP;5f202010-79a5-11d0-9020-00c04fc2d4cf;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \ + "(OA;CIIO;RP;bc0ac240-79a9-11d0-9020-00c04fc2d4cf;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \ + "(OA;CIIO;RP;bc0ac240-79a9-11d0-9020-00c04fc2d4cf;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \ + "(OA;CIIO;RP;59ba2f42-79a2-11d0-9020-00c04fc2d3cf;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \ + "(OA;CIIO;RP;59ba2f42-79a2-11d0-9020-00c04fc2d3cf;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \ + "(OA;CIIO;RP;037088f8-0ae1-11d2-b422-00a0c968f939;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \ + "(OA;CIIO;RP;037088f8-0ae1-11d2-b422-00a0c968f939;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \ + "(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;RO)" \ + "(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;DD)" \ + "(OA;CIIO;RP;b7c69e6d-2cc7-11d2-854e-00a0c983f608;bf967a86-0de6-11d0-a285-00aa003049e2;ED)" \ + "(OA;CIIO;RP;b7c69e6d-2cc7-11d2-854e-00a0c983f608;bf967a9c-0de6-11d0-a285-00aa003049e2;ED)" \ + "(OA;CIIO;RP;b7c69e6d-2cc7-11d2-854e-00a0c983f608;bf967aba-0de6-11d0-a285-00aa003049e2;ED)" \ + "(OA;;CR;89e95b76-444d-4c62-991a-0facbeda640c;;BA)" \ + "(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \ + "(OA;;CR;1131f6ab-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \ + "(OA;;CR;1131f6ac-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \ + "(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \ + "(OA;;CR;1131f6ae-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \ + "(OA;;CR;e2a36dc9-ae17-47c3-b58b-be34c55ba633;;S-1-5-32-557)" \ + "(OA;;RP;c7407360-20bf-11d0-a768-00aa006e0529;;RU)" \ + "(OA;;RP;b8119fd0-04f6-4762-ab7a-4986c76b3f9a;;RU)" \ + "(OA;CIIO;RPLCLORC;;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \ + "(OA;CIIO;RPLCLORC;;bf967a9c-0de6-11d0-a285-00aa003049e2;RU)" \ + "(OA;CIIO;RPLCLORC;;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \ + "(OA;;CR;05c74c5e-4deb-43b4-bd9f-86664c2a7fd5;;AU)" \ + "(OA;;CR;89e95b76-444d-4c62-991a-0facbeda640c;;ED)" \ + "(OA;;CR;ccc2dc7d-a6ad-4a7a-8846-c04e3cc53501;;AU)" \ + "(OA;;CR;280f369c-67c7-438e-ae98-1d46f3c6f541;;AU)" \ + "(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \ + "(OA;;CR;1131f6ab-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \ + "(OA;;CR;1131f6ac-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \ + "(OA;;CR;1131f6ae-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \ + "(OA;;RP;b8119fd0-04f6-4762-ab7a-4986c76b3f9a;;AU)" \ + "(OA;CIIO;RPWPCR;91e647de-d96f-4b70-9557-d63ff4f3ccd8;;PS)" \ + "(OA;CIIO;SW;9b026da6-0d3c-465c-8bee-5199d7165cba;bf967a86-0de6-11d0-a285-00aa003049e2;CO)" \ + "(OA;OICI;RPWP;3f78c3e5-f79a-46bd-a0b8-9d18116ddc79;;PS)" \ + "(OA;CIIO;SW;9b026da6-0d3c-465c-8bee-5199d7165cba;bf967a86-0de6-11d0-a285-00aa003049e2;PS)" \ + "(OA;CIIO;WP;ea1b7b93-5e48-46d5-bc6c-4df4fda78a35;bf967a86-0de6-11d0-a285-00aa003049e2;PS)" \ + "(A;;RPWPCRCCLCLORCWOWDSW;;;DA)" \ + "(A;CI;RPWPCRCCDCLCLORCWOWDSDDTSW;;;EA)" \ + "(A;;RPRC;;;RU)" \ + "(A;CI;LC;;;RU)" \ + "(A;CI;RPWPCRCCLCLORCWOWDSDSW;;;BA)" \ + "(A;;RP;;;WD)" \ + "(A;;RPLCLORC;;;ED)" \ + "(A;;RPLCLORC;;;AU)" \ + "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \ + "S:" \ + "(OU;CISA;WP;f30e3bbe-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)" \ + "(OU;CISA;WP;f30e3bbf-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)" \ + "(AU;SA;CR;;;DU)" \ + "(AU;SA;CR;;;BA)" \ + "(AU;SA;WPWOWD;;;WD)" + return sddl2binary(sddl, domain_sid, name_map) + + +def get_domain_computers_descriptor(domain_sid, name_map=None): + if name_map is None: + name_map = {} + + sddl = "D:" \ + "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \ + "(A;;RPWPCRCCDCLCLORCWOWDSW;;;DA)" \ + "(OA;;CCDC;bf967a86-0de6-11d0-a285-00aa003049e2;;AO)" \ + "(OA;;CCDC;bf967aba-0de6-11d0-a285-00aa003049e2;;AO)" \ + "(OA;;CCDC;bf967a9c-0de6-11d0-a285-00aa003049e2;;AO)" \ + "(OA;;CCDC;bf967aa8-0de6-11d0-a285-00aa003049e2;;PO)" \ + "(A;;RPLCLORC;;;AU)" \ + "(OA;;CCDC;4828cc14-1437-45bc-9b07-ad6f015e5f28;;AO)" \ + "S:" + return sddl2binary(sddl, domain_sid, name_map) + + +def get_domain_users_descriptor(domain_sid, name_map=None): + if name_map is None: + name_map = {} + + sddl = "D:" \ + "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \ + "(A;;RPWPCRCCDCLCLORCWOWDSW;;;DA)" \ + "(OA;;CCDC;bf967aba-0de6-11d0-a285-00aa003049e2;;AO)" \ + "(OA;;CCDC;bf967a9c-0de6-11d0-a285-00aa003049e2;;AO)" \ + "(OA;;CCDC;bf967aa8-0de6-11d0-a285-00aa003049e2;;PO)" \ + "(A;;RPLCLORC;;;AU)" \ + "(OA;;CCDC;4828cc14-1437-45bc-9b07-ad6f015e5f28;;AO)" \ + "S:" + return sddl2binary(sddl, domain_sid, name_map) + + +def get_managed_service_accounts_descriptor(domain_sid, name_map=None): + if name_map is None: + name_map = {} + + sddl = "D:" \ + "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \ + "(A;;RPWPCRCCDCLCLORCWOWDSW;;;DA)" \ + "(OA;;CCDC;ce206244-5827-4a86-ba1c-1c0c386c1b64;;AO)" \ + "(OA;;CCDC;bf967aba-0de6-11d0-a285-00aa003049e2;;AO)" \ + "(OA;;CCDC;bf967a9c-0de6-11d0-a285-00aa003049e2;;AO)" \ + "(A;;RPLCLORC;;;AU)" \ + "S:" + return sddl2binary(sddl, domain_sid, name_map) + + +def get_domain_controllers_descriptor(domain_sid, name_map=None): + if name_map is None: + name_map = {} + + sddl = "D:" \ + "(A;;RPLCLORC;;;AU)" \ + "(A;;RPWPCRCCLCLORCWOWDSW;;;DA)" \ + "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \ + "(A;;RPLCLORC;;;ED)" \ + "S:" \ + "(AU;SA;CCDCWOWDSDDT;;;WD)" \ + "(AU;CISA;WP;;;WD)" + return sddl2binary(sddl, domain_sid, name_map) + + +def get_domain_delete_protected1_descriptor(domain_sid, name_map=None): + if name_map is None: + name_map = {} + + sddl = "D:AI" \ + "(A;;RPLCLORC;;;AU)" \ + "(A;;RPWPCRCCLCLORCWOWDSW;;;DA)" \ + "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" + return sddl2binary(sddl, domain_sid, name_map) + + +def get_domain_delete_protected2_descriptor(domain_sid, name_map=None): + if name_map is None: + name_map = {} + + sddl = "D:AI" \ + "(A;;RPLCLORC;;;AU)" \ + "(A;;RPWPCRCCDCLCLORCWOWDSW;;;DA)" \ + "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" + return sddl2binary(sddl, domain_sid, name_map) + + +def get_dns_partition_descriptor(domain_sid, name_map=None): + if name_map is None: + name_map = {} + + sddl = "O:SYG:BAD:AI" \ + "(OA;CIIO;RP;4c164200-20c0-11d0-a768-00aa006e0529;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \ + "(OA;CIIO;RP;4c164200-20c0-11d0-a768-00aa006e0529;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \ + "(OA;CIIO;RP;5f202010-79a5-11d0-9020-00c04fc2d4cf;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \ + "(OA;CIIO;RP;5f202010-79a5-11d0-9020-00c04fc2d4cf;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \ + "(OA;CIIO;RP;bc0ac240-79a9-11d0-9020-00c04fc2d4cf;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \ + "(OA;CIIO;RP;bc0ac240-79a9-11d0-9020-00c04fc2d4cf;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \ + "(OA;CIIO;RP;59ba2f42-79a2-11d0-9020-00c04fc2d3cf;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \ + "(OA;CIIO;RP;59ba2f42-79a2-11d0-9020-00c04fc2d3cf;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \ + "(OA;CIIO;RP;037088f8-0ae1-11d2-b422-00a0c968f939;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \ + "(OA;CIIO;RP;037088f8-0ae1-11d2-b422-00a0c968f939;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \ + "(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;RO)" \ + "(OA;CIIO;RP;b7c69e6d-2cc7-11d2-854e-00a0c983f608;bf967a86-0de6-11d0-a285-00aa003049e2;ED)" \ + "(OA;CIIO;RP;b7c69e6d-2cc7-11d2-854e-00a0c983f608;bf967a9c-0de6-11d0-a285-00aa003049e2;ED)" \ + "(OA;CIIO;RP;b7c69e6d-2cc7-11d2-854e-00a0c983f608;bf967aba-0de6-11d0-a285-00aa003049e2;ED)" \ + "(OA;;CR;89e95b76-444d-4c62-991a-0facbeda640c;;BA)" \ + "(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \ + "(OA;;CR;1131f6ab-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \ + "(OA;;CR;1131f6ac-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \ + "(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \ + "(OA;;CR;1131f6ae-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \ + "(OA;;CR;e2a36dc9-ae17-47c3-b58b-be34c55ba633;;S-1-5-32-557)" \ + "(OA;;RP;c7407360-20bf-11d0-a768-00aa006e0529;;RU)" \ + "(OA;;RP;b8119fd0-04f6-4762-ab7a-4986c76b3f9a;;RU)" \ + "(OA;CIIO;RPLCLORC;;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \ + "(OA;CIIO;RPLCLORC;;bf967a9c-0de6-11d0-a285-00aa003049e2;RU)" \ + "(OA;CIIO;RPLCLORC;;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \ + "(OA;;CR;05c74c5e-4deb-43b4-bd9f-86664c2a7fd5;;AU)" \ + "(OA;;CR;89e95b76-444d-4c62-991a-0facbeda640c;;ED)" \ + "(OA;;CR;ccc2dc7d-a6ad-4a7a-8846-c04e3cc53501;;AU)" \ + "(OA;;CR;280f369c-67c7-438e-ae98-1d46f3c6f541;;AU)" \ + "(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \ + "(OA;;CR;1131f6ab-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \ + "(OA;;CR;1131f6ac-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \ + "(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \ + "(OA;;CR;1131f6ae-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \ + "(OA;;RP;b8119fd0-04f6-4762-ab7a-4986c76b3f9a;;AU)" \ + "(OA;CIIO;RPWPCR;91e647de-d96f-4b70-9557-d63ff4f3ccd8;;PS)" \ + "(OA;CIIO;SW;9b026da6-0d3c-465c-8bee-5199d7165cba;bf967a86-0de6-11d0-a285-00aa003049e2;CO)" \ + "(OA;OICI;RPWP;3f78c3e5-f79a-46bd-a0b8-9d18116ddc79;;PS)" \ + "(OA;CIIO;SW;9b026da6-0d3c-465c-8bee-5199d7165cba;bf967a86-0de6-11d0-a285-00aa003049e2;PS)" \ + "(OA;CIIO;WP;ea1b7b93-5e48-46d5-bc6c-4df4fda78a35;bf967a86-0de6-11d0-a285-00aa003049e2;PS)" \ + "(A;;RPWPCRCCLCLORCWOWDSW;;;DA)" \ + "(A;CI;RPWPCRCCDCLCLORCWOWDSDDTSW;;;EA)" \ + "(A;;RPRC;;;RU)" \ + "(A;CI;LC;;;RU)" \ + "(A;CI;RPWPCRCCLCLORCWOWDSDSW;;;BA)" \ + "(A;;RP;;;WD)" \ + "(A;;RPLCLORC;;;ED)" \ + "(A;;RPLCLORC;;;AU)" \ + "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \ + "S:AI" \ + "(OU;CISA;WP;f30e3bbe-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)" \ + "(OU;CISA;WP;f30e3bbf-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)" \ + "(AU;SA;CR;;;DU)(AU;SA;CR;;;BA)(AU;SA;WPWOWD;;;WD)" + return sddl2binary(sddl, domain_sid, name_map) + + +def get_dns_forest_microsoft_dns_descriptor(domain_sid, name_map=None): + if name_map is None: + name_map = {} + + sddl = "O:SYG:SYD:AI" \ + "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \ + "(A;CI;RPWPCRCCDCLCRCWOWDSDDTSW;;;ED)" + return sddl2binary(sddl, domain_sid, name_map) + + +def get_dns_domain_microsoft_dns_descriptor(domain_sid, name_map=None): + if name_map is None: + name_map = {} + + sddl = "O:SYG:SYD:AI" \ + "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;DA)" \ + "(A;CI;RPWPCRCCDCLCRCWOWDSDDTSW;;;DnsAdmins)" \ + "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \ + "(A;CI;RPWPCRCCDCLCRCWOWDSDDTSW;;;ED)" + return sddl2binary(sddl, domain_sid, name_map) + + +def get_paritions_crossref_subdomain_descriptor(domain_sid, name_map=None): + if name_map is None: + name_map = {} + + sddl = "O:SubdomainAdminsG:SubdomainAdminsD:AI" \ + "(A;;RPWPCRCCLCLORCWOWDSW;;;SubdomainAdmins)" \ + "(A;;RPLCLORC;;;AU)" \ + "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" + return sddl2binary(sddl, domain_sid, name_map) + + +def get_wellknown_sds(samdb): + + # Then subcontainers + subcontainers = [ + (ldb.Dn(samdb, "%s" % str(samdb.domain_dn())), get_domain_descriptor), + (ldb.Dn(samdb, "CN=Deleted Objects,%s" % str(samdb.domain_dn())), get_deletedobjects_descriptor), + (ldb.Dn(samdb, "CN=LostAndFound,%s" % str(samdb.domain_dn())), get_domain_delete_protected2_descriptor), + (ldb.Dn(samdb, "CN=System,%s" % str(samdb.domain_dn())), get_domain_delete_protected1_descriptor), + (ldb.Dn(samdb, "CN=Infrastructure,%s" % str(samdb.domain_dn())), get_domain_infrastructure_descriptor), + (ldb.Dn(samdb, "CN=Builtin,%s" % str(samdb.domain_dn())), get_domain_builtin_descriptor), + (ldb.Dn(samdb, "CN=Computers,%s" % str(samdb.domain_dn())), get_domain_computers_descriptor), + (ldb.Dn(samdb, "CN=Users,%s" % str(samdb.domain_dn())), get_domain_users_descriptor), + (ldb.Dn(samdb, "OU=Domain Controllers,%s" % str(samdb.domain_dn())), get_domain_controllers_descriptor), + (ldb.Dn(samdb, "CN=MicrosoftDNS,CN=System,%s" % str(samdb.domain_dn())), get_dns_domain_microsoft_dns_descriptor), + + (ldb.Dn(samdb, "%s" % str(samdb.get_config_basedn())), get_config_descriptor), + (ldb.Dn(samdb, "CN=Deleted Objects,%s" % str(samdb.get_config_basedn())), get_deletedobjects_descriptor), + (ldb.Dn(samdb, "CN=NTDS Quotas,%s" % str(samdb.get_config_basedn())), get_config_ntds_quotas_descriptor), + (ldb.Dn(samdb, "CN=LostAndFoundConfig,%s" % str(samdb.get_config_basedn())), get_config_delete_protected1wd_descriptor), + (ldb.Dn(samdb, "CN=Services,%s" % str(samdb.get_config_basedn())), get_config_delete_protected1_descriptor), + (ldb.Dn(samdb, "CN=Physical Locations,%s" % str(samdb.get_config_basedn())), get_config_delete_protected1wd_descriptor), + (ldb.Dn(samdb, "CN=WellKnown Security Principals,%s" % str(samdb.get_config_basedn())), get_config_delete_protected1wd_descriptor), + (ldb.Dn(samdb, "CN=ForestUpdates,%s" % str(samdb.get_config_basedn())), get_config_delete_protected1wd_descriptor), + (ldb.Dn(samdb, "CN=DisplaySpecifiers,%s" % str(samdb.get_config_basedn())), get_config_delete_protected2_descriptor), + (ldb.Dn(samdb, "CN=Extended-Rights,%s" % str(samdb.get_config_basedn())), get_config_delete_protected2_descriptor), + (ldb.Dn(samdb, "CN=Partitions,%s" % str(samdb.get_config_basedn())), get_config_partitions_descriptor), + (ldb.Dn(samdb, "CN=Sites,%s" % str(samdb.get_config_basedn())), get_config_sites_descriptor), + + (ldb.Dn(samdb, "%s" % str(samdb.get_schema_basedn())), get_schema_descriptor), + ] + + current = samdb.search(expression="(objectClass=*)", + base="", scope=ldb.SCOPE_BASE, + attrs=["namingContexts"]) + + for nc in current[0]["namingContexts"]: + + dnsforestdn = ldb.Dn(samdb, "DC=ForestDnsZones,%s" % (str(samdb.get_root_basedn()))) + if ldb.Dn(samdb, nc.decode('utf8')) == dnsforestdn: + c = (ldb.Dn(samdb, "%s" % str(dnsforestdn)), get_dns_partition_descriptor) + subcontainers.append(c) + c = (ldb.Dn(samdb, "CN=Deleted Objects,%s" % str(dnsforestdn)), + get_deletedobjects_descriptor) + subcontainers.append(c) + c = (ldb.Dn(samdb, "CN=Infrastructure,%s" % str(dnsforestdn)), + get_domain_delete_protected1_descriptor) + subcontainers.append(c) + c = (ldb.Dn(samdb, "CN=LostAndFound,%s" % str(dnsforestdn)), + get_domain_delete_protected2_descriptor) + subcontainers.append(c) + c = (ldb.Dn(samdb, "CN=MicrosoftDNS,%s" % str(dnsforestdn)), + get_dns_forest_microsoft_dns_descriptor) + subcontainers.append(c) + continue + + dnsdomaindn = ldb.Dn(samdb, "DC=DomainDnsZones,%s" % (str(samdb.domain_dn()))) + if ldb.Dn(samdb, nc.decode('utf8')) == dnsdomaindn: + c = (ldb.Dn(samdb, "%s" % str(dnsdomaindn)), get_dns_partition_descriptor) + subcontainers.append(c) + c = (ldb.Dn(samdb, "CN=Deleted Objects,%s" % str(dnsdomaindn)), + get_deletedobjects_descriptor) + subcontainers.append(c) + c = (ldb.Dn(samdb, "CN=Infrastructure,%s" % str(dnsdomaindn)), + get_domain_delete_protected1_descriptor) + subcontainers.append(c) + c = (ldb.Dn(samdb, "CN=LostAndFound,%s" % str(dnsdomaindn)), + get_domain_delete_protected2_descriptor) + subcontainers.append(c) + c = (ldb.Dn(samdb, "CN=MicrosoftDNS,%s" % str(dnsdomaindn)), + get_dns_domain_microsoft_dns_descriptor) + subcontainers.append(c) + + return subcontainers + + +def chunck_acl(acl): + """Return separate ACE of an ACL + + :param acl: A string representing the ACL + :return: A hash with different parts + """ + + p = re.compile(r'(\w+)?(\(.*?\))') + tab = p.findall(acl) + + hash = {} + hash["aces"] = [] + for e in tab: + if len(e[0]) > 0: + hash["flags"] = e[0] + hash["aces"].append(e[1]) + + return hash + + +def chunck_sddl(sddl): + """ Return separate parts of the SDDL (owner, group, ...) + + :param sddl: An string containing the SDDL to chunk + :return: A hash with the different chunk + """ + + p = re.compile(r'([OGDS]:)(.*?)(?=(?:[GDS]:|$))') + tab = p.findall(sddl) + + hash = {} + for e in tab: + if e[0] == "O:": + hash["owner"] = e[1] + if e[0] == "G:": + hash["group"] = e[1] + if e[0] == "D:": + hash["dacl"] = e[1] + if e[0] == "S:": + hash["sacl"] = e[1] + + return hash + + +def get_clean_sd(sd): + """Get the SD without any inherited ACEs + + :param sd: SD to strip + :return: An SD with inherited ACEs stripped + """ + + sd_clean = security.descriptor() + sd_clean.owner_sid = sd.owner_sid + sd_clean.group_sid = sd.group_sid + sd_clean.type = sd.type + sd_clean.revision = sd.revision + + aces = [] + if sd.sacl is not None: + aces = sd.sacl.aces + for i in range(0, len(aces)): + ace = aces[i] + + if not ace.flags & security.SEC_ACE_FLAG_INHERITED_ACE: + sd_clean.sacl_add(ace) + continue + + aces = [] + if sd.dacl is not None: + aces = sd.dacl.aces + for i in range(0, len(aces)): + ace = aces[i] + + if not ace.flags & security.SEC_ACE_FLAG_INHERITED_ACE: + sd_clean.dacl_add(ace) + continue + return sd_clean + + +def get_diff_sds(refsd, cursd, domainsid, checkSacl=True, + ignoreAdditionalACEs=False): + """Get the difference between 2 sd + + This function split the textual representation of ACL into smaller + chunk in order to not to report a simple permutation as a difference + + :param refsddl: First sddl to compare + :param cursddl: Second sddl to compare + :param checkSacl: If false we skip the sacl checks + :return: A string that explain difference between sddls + """ + + cursddl = get_clean_sd(cursd).as_sddl(domainsid) + refsddl = get_clean_sd(refsd).as_sddl(domainsid) + + txt = "" + hash_cur = chunck_sddl(cursddl) + hash_ref = chunck_sddl(refsddl) + + if "owner" not in hash_cur: + txt = "\tNo owner in current SD" + elif "owner" in hash_ref and hash_cur["owner"] != hash_ref["owner"]: + txt = "\tOwner mismatch: %s (in ref) %s" \ + "(in current)\n" % (hash_ref["owner"], hash_cur["owner"]) + + if "group" not in hash_cur: + txt = "%s\tNo group in current SD" % txt + elif "group" in hash_ref and hash_cur["group"] != hash_ref["group"]: + txt = "%s\tGroup mismatch: %s (in ref) %s" \ + "(in current)\n" % (txt, hash_ref["group"], hash_cur["group"]) + + parts = ["dacl"] + if checkSacl: + parts.append("sacl") + for part in parts: + if part in hash_cur and part in hash_ref: + + # both are present, check if they contain the same ACE + h_cur = set() + h_ref = set() + c_cur = chunck_acl(hash_cur[part]) + c_ref = chunck_acl(hash_ref[part]) + + for elem in c_cur["aces"]: + h_cur.add(elem) + + for elem in c_ref["aces"]: + h_ref.add(elem) + + for k in set(h_ref): + if k in h_cur: + h_cur.remove(k) + h_ref.remove(k) + + if len(h_cur) + len(h_ref) > 0: + if txt == "" and len(h_ref) == 0: + if ignoreAdditionalACEs: + return "" + + txt = "%s\tPart %s is different between reference" \ + " and current here is the detail:\n" % (txt, part) + + for item in h_cur: + txt = "%s\t\t%s ACE is not present in the" \ + " reference\n" % (txt, item) + + for item in h_ref: + txt = "%s\t\t%s ACE is not present in the" \ + " current\n" % (txt, item) + + elif part in hash_cur and part not in hash_ref: + txt = "%s\tReference ACL hasn't a %s part\n" % (txt, part) + elif part not in hash_cur and part in hash_ref: + txt = "%s\tCurrent ACL hasn't a %s part\n" % (txt, part) + + return txt diff --git a/python/samba/dnsresolver.py b/python/samba/dnsresolver.py new file mode 100644 index 0000000..a627555 --- /dev/null +++ b/python/samba/dnsresolver.py @@ -0,0 +1,68 @@ +# Samba wrapper for DNS resolvers +# +# Copyright (C) Stanislav Levin +# Copyright (C) Alexander Bokovoy +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import dns.resolver +import dns.rdatatype +import dns.reversename + +class DNSResolver(dns.resolver.Resolver): + """DNS stub resolver compatible with both dnspython < 2.0.0 + and dnspython >= 2.0.0. + + Set `use_search_by_default` attribute to `True`, which + determines the default for whether the search list configured + in the system's resolver configuration is used for relative + names, and whether the resolver's domain may be added to relative + names. + + Increase the default lifetime which determines the number of seconds + to spend trying to get an answer to the question. dnspython 2.0.0 + changes this to 5sec, while the previous one was 30sec. + """ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.reset_defaults() + self.resolve = getattr(super(), "resolve", self.query) + self.resolve_address = getattr( + super(), + "resolve_address", + self._resolve_address + ) + + def reset_defaults(self): + self.use_search_by_default = True + # the default is 5sec + self.lifetime = 15 + + def reset(self): + super().reset() + self.reset_defaults() + + def _resolve_address(self, ip_address, *args, **kwargs): + """Query nameservers for PTR records. + + :param ip_address: IPv4 or IPv6 address + :type ip_address: str + """ + return self.resolve( + dns.reversename.from_address(ip_address), + rdtype=dns.rdatatype.PTR, + *args, + **kwargs, + ) diff --git a/python/samba/dnsserver.py b/python/samba/dnsserver.py new file mode 100644 index 0000000..d907f8e --- /dev/null +++ b/python/samba/dnsserver.py @@ -0,0 +1,405 @@ +# helper for DNS management tool +# +# Copyright (C) Amitay Isaacs 2011-2012 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import shlex +import socket +from samba.dcerpc import dnsserver, dnsp +from samba import WERRORError, werror + +# Note: these are not quite the same as similar looking classes in +# provision/sambadns.py -- those ones are based on +# dnsp.DnssrvRpcRecord, these are based on dnsserver.DNS_RPC_RECORD. +# They encode the same information in slightly different ways. +# +# DNS_RPC_RECORD structures ([MS-DNSP]2.2.2.2.5 "DNS_RPC_RECORD") are +# used on the wire by DnssrvEnumRecords2. The dnsp.DnssrvRpcRecord +# versions have the in-database version of the same information, where +# the flags field is unpacked, and the struct ordering is different. +# See [MS-DNSP] 2.3.2.2 "DnsRecord". +# +# In both cases the structure and contents of .data depend on .wType. +# For example, if .wType is DNS_TYPE_A, .data is an IPv4 address. If +# the .wType is changed to DNS_TYPE_CNAME, the contents of .data will +# be interpreted as a cname blob, but the bytes there will still be +# those of the IPv4 address. If you don't also set the .data you may +# encounter stability problems. These DNS_RPC_RECORD subclasses +# attempt to hide that from you, but are only pretending -- any of +# them can represent any type of record. + + +class DNSParseError(ValueError): + pass + + +class ARecord(dnsserver.DNS_RPC_RECORD): + def __init__(self, ip_addr, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE, + node_flag=0): + super().__init__() + self.wType = dnsp.DNS_TYPE_A + self.dwFlags = rank | node_flag + self.dwSerial = serial + self.dwTtlSeconds = ttl + self.data = ip_addr + + @classmethod + def from_string(cls, data, sep=None, **kwargs): + return cls(data, **kwargs) + + +class AAAARecord(dnsserver.DNS_RPC_RECORD): + + def __init__(self, ip6_addr, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE, + node_flag=0): + super().__init__() + self.wType = dnsp.DNS_TYPE_AAAA + self.dwFlags = rank | node_flag + self.dwSerial = serial + self.dwTtlSeconds = ttl + self.data = ip6_addr + + @classmethod + def from_string(cls, data, sep=None, **kwargs): + return cls(data, **kwargs) + + +class PTRRecord(dnsserver.DNS_RPC_RECORD): + + def __init__(self, ptr, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE, + node_flag=0): + super().__init__() + self.wType = dnsp.DNS_TYPE_PTR + self.dwFlags = rank | node_flag + self.dwSerial = serial + self.dwTtlSeconds = ttl + ptr_name = dnsserver.DNS_RPC_NAME() + ptr_name.str = ptr + ptr_name.len = len(ptr) + self.data = ptr_name + + @classmethod + def from_string(cls, data, sep=None, **kwargs): + return cls(data, **kwargs) + + +class CNAMERecord(dnsserver.DNS_RPC_RECORD): + + def __init__(self, cname, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE, + node_flag=0): + super().__init__() + self.wType = dnsp.DNS_TYPE_CNAME + self.dwFlags = rank | node_flag + self.dwSerial = serial + self.dwTtlSeconds = ttl + cname_name = dnsserver.DNS_RPC_NAME() + cname_name.str = cname + cname_name.len = len(cname) + self.data = cname_name + + @classmethod + def from_string(cls, data, sep=None, **kwargs): + return cls(data, **kwargs) + + +class NSRecord(dnsserver.DNS_RPC_RECORD): + + def __init__(self, dns_server, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE, + node_flag=0): + super().__init__() + self.wType = dnsp.DNS_TYPE_NS + self.dwFlags = rank | node_flag + self.dwSerial = serial + self.dwTtlSeconds = ttl + ns = dnsserver.DNS_RPC_NAME() + ns.str = dns_server + ns.len = len(dns_server) + self.data = ns + + @classmethod + def from_string(cls, data, sep=None, **kwargs): + return cls(data, **kwargs) + + +class MXRecord(dnsserver.DNS_RPC_RECORD): + + def __init__(self, mail_server, preference, serial=1, ttl=900, + rank=dnsp.DNS_RANK_ZONE, node_flag=0): + super().__init__() + self.wType = dnsp.DNS_TYPE_MX + self.dwFlags = rank | node_flag + self.dwSerial = serial + self.dwTtlSeconds = ttl + mx = dnsserver.DNS_RPC_RECORD_NAME_PREFERENCE() + mx.wPreference = preference + mx.nameExchange.str = mail_server + mx.nameExchange.len = len(mail_server) + self.data = mx + + @classmethod + def from_string(cls, data, sep=None, **kwargs): + try: + server, priority = data.split(sep) + priority = int(priority) + except ValueError as e: + raise DNSParseError("MX data must have server and priority " + "(space separated), not %r" % data) from e + return cls(server, priority, **kwargs) + + +class SOARecord(dnsserver.DNS_RPC_RECORD): + + def __init__(self, mname, rname, serial=1, refresh=900, retry=600, + expire=86400, minimum=3600, ttl=3600, rank=dnsp.DNS_RANK_ZONE, + node_flag=dnsp.DNS_RPC_FLAG_AUTH_ZONE_ROOT): + super().__init__() + self.wType = dnsp.DNS_TYPE_SOA + self.dwFlags = rank | node_flag + self.dwSerial = serial + self.dwTtlSeconds = ttl + soa = dnsserver.DNS_RPC_RECORD_SOA() + soa.dwSerialNo = serial + soa.dwRefresh = refresh + soa.dwRetry = retry + soa.dwExpire = expire + soa.dwMinimumTtl = minimum + soa.NamePrimaryServer.str = mname + soa.NamePrimaryServer.len = len(mname) + soa.ZoneAdministratorEmail.str = rname + soa.ZoneAdministratorEmail.len = len(rname) + self.data = soa + + @classmethod + def from_string(cls, data, sep=None, **kwargs): + args = data.split(sep) + if len(args) != 7: + raise DNSParseError('Data requires 7 space separated elements - ' + 'nameserver, email, serial, ' + 'refresh, retry, expire, minimumttl') + try: + for i in range(2, 7): + args[i] = int(args[i]) + except ValueError as e: + raise DNSParseError("SOA serial, refresh, retry, expire, minimumttl' " + "should be integers") from e + return cls(*args, **kwargs) + + +class SRVRecord(dnsserver.DNS_RPC_RECORD): + + def __init__(self, target, port, priority=0, weight=100, serial=1, ttl=900, + rank=dnsp.DNS_RANK_ZONE, node_flag=0): + super().__init__() + self.wType = dnsp.DNS_TYPE_SRV + self.dwFlags = rank | node_flag + self.dwSerial = serial + self.dwTtlSeconds = ttl + srv = dnsserver.DNS_RPC_RECORD_SRV() + srv.wPriority = priority + srv.wWeight = weight + srv.wPort = port + srv.nameTarget.str = target + srv.nameTarget.len = len(target) + self.data = srv + + @classmethod + def from_string(cls, data, sep=None, **kwargs): + try: + target, port, priority, weight = data.split(sep) + except ValueError as e: + raise DNSParseError("SRV data must have four space " + "separated elements: " + "server, port, priority, weight; " + "not %r" % data) from e + try: + args = (target, int(port), int(priority), int(weight)) + except ValueError as e: + raise DNSParseError("SRV port, priority, and weight " + "must be integers") from e + + return cls(*args, **kwargs) + + +class TXTRecord(dnsserver.DNS_RPC_RECORD): + + def __init__(self, slist, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE, + node_flag=0): + super().__init__() + self.wType = dnsp.DNS_TYPE_TXT + self.dwFlags = rank | node_flag + self.dwSerial = serial + self.dwTtlSeconds = ttl + if isinstance(slist, str): + slist = [slist] + names = [] + for s in slist: + name = dnsserver.DNS_RPC_NAME() + name.str = s + name.len = len(s) + names.append(name) + txt = dnsserver.DNS_RPC_RECORD_STRING() + txt.count = len(slist) + txt.str = names + self.data = txt + + @classmethod + def from_string(cls, data, sep=None, **kwargs): + slist = shlex.split(data) + return cls(slist, **kwargs) + + +# +# Don't add new Record types after this line + +_RECORD_TYPE_LUT = {} +def _setup_record_type_lut(): + for k, v in globals().items(): + if k[-6:] == 'Record': + k = k[:-6] + flag = getattr(dnsp, 'DNS_TYPE_' + k) + _RECORD_TYPE_LUT[k] = v + _RECORD_TYPE_LUT[flag] = v + +_setup_record_type_lut() +del _setup_record_type_lut + + +def record_from_string(t, data, sep=None, **kwargs): + """Get a DNS record of type t based on the data string. + Additional keywords (ttl, rank, etc) can be passed in. + + t can be a dnsp.DNS_TYPE_* integer or a string like "A", "TXT", etc. + """ + if isinstance(t, str): + t = t.upper() + try: + Record = _RECORD_TYPE_LUT[t] + except KeyError as e: + raise DNSParseError("Unsupported record type") from e + + return Record.from_string(data, sep=sep, **kwargs) + + +def flag_from_string(rec_type): + rtype = rec_type.upper() + try: + return getattr(dnsp, 'DNS_TYPE_' + rtype) + except AttributeError as e: + raise DNSParseError('Unknown type of DNS record %s' % rec_type) from e + + +def recbuf_from_string(*args, **kwargs): + rec = record_from_string(*args, **kwargs) + buf = dnsserver.DNS_RPC_RECORD_BUF() + buf.rec = rec + return buf + + +def dns_name_equal(n1, n2): + """Match dns name (of type DNS_RPC_NAME)""" + return n1.str.rstrip('.').lower() == n2.str.rstrip('.').lower() + + +def ipv6_normalise(addr): + """Convert an AAAA address into a canonical form.""" + packed = socket.inet_pton(socket.AF_INET6, addr) + return socket.inet_ntop(socket.AF_INET6, packed) + + +def dns_record_match(dns_conn, server, zone, name, record_type, data): + """Find a dns record that matches the specified data""" + + # The matching is not as precises as that offered by + # dsdb_dns.match_record, which, for example, compares IPv6 records + # semantically rather than as strings. However that function + # compares database DnssrvRpcRecord structures, not wire + # DNS_RPC_RECORD structures. + # + # While it would be possible, perhaps desirable, to wrap that + # function for use in samba-tool, there is value in having a + # separate implementation for tests, to avoid the circularity of + # asserting the function matches itself. + + urec = record_from_string(record_type, data) + + select_flags = dnsserver.DNS_RPC_VIEW_AUTHORITY_DATA + + try: + buflen, res = dns_conn.DnssrvEnumRecords2( + dnsserver.DNS_CLIENT_VERSION_LONGHORN, 0, server, zone, name, None, + record_type, select_flags, None, None) + except WERRORError as e: + if e.args[0] == werror.WERR_DNS_ERROR_NAME_DOES_NOT_EXIST: + # Either the zone doesn't exist, or there were no records. + # We can't differentiate the two. + return None + raise e + + if not res or res.count == 0: + return None + + for rec in res.rec[0].records: + if rec.wType != record_type: + continue + + found = False + if record_type == dnsp.DNS_TYPE_A: + if rec.data == urec.data: + found = True + elif record_type == dnsp.DNS_TYPE_AAAA: + if ipv6_normalise(rec.data) == ipv6_normalise(urec.data): + found = True + elif record_type == dnsp.DNS_TYPE_PTR: + if dns_name_equal(rec.data, urec.data): + found = True + elif record_type == dnsp.DNS_TYPE_CNAME: + if dns_name_equal(rec.data, urec.data): + found = True + elif record_type == dnsp.DNS_TYPE_NS: + if dns_name_equal(rec.data, urec.data): + found = True + elif record_type == dnsp.DNS_TYPE_MX: + if dns_name_equal(rec.data.nameExchange, urec.data.nameExchange) and \ + rec.data.wPreference == urec.data.wPreference: + found = True + elif record_type == dnsp.DNS_TYPE_SRV: + if rec.data.wPriority == urec.data.wPriority and \ + rec.data.wWeight == urec.data.wWeight and \ + rec.data.wPort == urec.data.wPort and \ + dns_name_equal(rec.data.nameTarget, urec.data.nameTarget): + found = True + elif record_type == dnsp.DNS_TYPE_SOA: + if rec.data.dwSerialNo == urec.data.dwSerialNo and \ + rec.data.dwRefresh == urec.data.dwRefresh and \ + rec.data.dwRetry == urec.data.dwRetry and \ + rec.data.dwExpire == urec.data.dwExpire and \ + rec.data.dwMinimumTtl == urec.data.dwMinimumTtl and \ + dns_name_equal(rec.data.NamePrimaryServer, + urec.data.NamePrimaryServer) and \ + dns_name_equal(rec.data.ZoneAdministratorEmail, + urec.data.ZoneAdministratorEmail): + found = True + elif record_type == dnsp.DNS_TYPE_TXT: + if rec.data.count == urec.data.count: + found = True + for i in range(rec.data.count): + found = found and \ + (rec.data.str[i].str == urec.data.str[i].str) + + if found: + return rec + + return None diff --git a/python/samba/domain_update.py b/python/samba/domain_update.py new file mode 100644 index 0000000..e91bdf4 --- /dev/null +++ b/python/samba/domain_update.py @@ -0,0 +1,573 @@ +# Samba4 Domain update checker +# +# Copyright (C) Andrew Bartlett 2017 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import ldb +from base64 import b64encode +from samba import sd_utils +from samba.dcerpc import security +from samba.descriptor import ( + get_managed_service_accounts_descriptor, +) +from samba.dsdb import ( + DS_DOMAIN_FUNCTION_2008, + DS_DOMAIN_FUNCTION_2008_R2, + DS_DOMAIN_FUNCTION_2012, + DS_DOMAIN_FUNCTION_2012_R2, + DS_DOMAIN_FUNCTION_2016, +) + +MIN_UPDATE = 75 +MAX_UPDATE = 89 + +update_map = { + # Missing updates from 2008 R2 - version 5 + 75: "5e1574f6-55df-493e-a671-aaeffca6a100", + 76: "d262aae8-41f7-48ed-9f35-56bbb677573d", + 77: "82112ba0-7e4c-4a44-89d9-d46c9612bf91", + # Windows Server 2012 - version 9 + 78: "c3c927a6-cc1d-47c0-966b-be8f9b63d991", + 79: "54afcfb9-637a-4251-9f47-4d50e7021211", + 80: "f4728883-84dd-483c-9897-274f2ebcf11e", + 81: "ff4f9d27-7157-4cb0-80a9-5d6f2b14c8ff", + # Windows Server 2012 R2 - version 10 + # No updates + # Windows Server 2016 - version 15 + 82: "83c53da7-427e-47a4-a07a-a324598b88f7", + # from the documentation and a fresh installation + # 83 is this: + # c81fc9cc-0130-4fd1-b272-634d74818133 + # adprep will use this on the wire: + # c81fc9cc-0130-f4d1-b272-634d74818133 + 83: "c81fc9cc-0130-4fd1-b272-634d74818133", + 84: "e5f9e791-d96d-4fc9-93c9-d53e1dc439ba", + 85: "e6d5fd00-385d-4e65-b02d-9da3493ed850", + 86: "3a6b3fbf-3168-4312-a10d-dd5b3393952d", + 87: "7f950403-0ab3-47f9-9730-5d7b0269f9bd", + 88: "434bb40d-dbc9-4fe7-81d4-d57229f7b080", + # Windows Server 2016 - version 16 + 89: "a0c238ba-9e30-4ee6-80a6-43f731e9a5cd", +} + + +functional_level_to_max_update = { + DS_DOMAIN_FUNCTION_2008: 74, + DS_DOMAIN_FUNCTION_2008_R2: 77, + DS_DOMAIN_FUNCTION_2012: 81, + DS_DOMAIN_FUNCTION_2012_R2: 81, + DS_DOMAIN_FUNCTION_2016: 89, +} + +functional_level_to_version = { + DS_DOMAIN_FUNCTION_2008: 3, + DS_DOMAIN_FUNCTION_2008_R2: 5, + DS_DOMAIN_FUNCTION_2012: 9, + DS_DOMAIN_FUNCTION_2012_R2: 10, + DS_DOMAIN_FUNCTION_2016: 16, +} + +# No update numbers have been skipped over +missing_updates = [] + + +class DomainUpdateException(Exception): + pass + + +class DomainUpdate(object): + """Check and update a SAM database for domain updates""" + + def __init__(self, samdb, fix=False, + add_update_container=True): + """ + :param samdb: LDB database + :param fix: Apply the update if the container is missing + :param add_update_container: Add the container at the end of the change + :raise DomainUpdateException: + """ + self.samdb = samdb + self.fix = fix + self.add_update_container = add_update_container + # TODO: In future we should check for inconsistencies when it claims it has been done + self.check_update_applied = False + + self.config_dn = self.samdb.get_config_basedn() + self.domain_dn = self.samdb.domain_dn() + self.schema_dn = self.samdb.get_schema_basedn() + + self.sd_utils = sd_utils.SDUtils(samdb) + self.domain_sid = security.dom_sid(samdb.get_domain_sid()) + + self.domainupdate_container = self.samdb.get_root_basedn() + try: + self.domainupdate_container.add_child("CN=Operations,CN=DomainUpdates,CN=System") + except ldb.LdbError: + raise DomainUpdateException("Failed to add domain update container child") + + self.revision_object = self.samdb.get_root_basedn() + try: + self.revision_object.add_child("CN=ActiveDirectoryUpdate,CN=DomainUpdates,CN=System") + except ldb.LdbError: + raise DomainUpdateException("Failed to add revision object child") + + def check_updates_functional_level(self, functional_level, + old_functional_level=None, + update_revision=False): + """ + Apply all updates for a given old and new functional level + :param functional_level: constant + :param old_functional_level: constant + :param update_revision: modify the stored version + :raise DomainUpdateException: + """ + res = self.samdb.search(base=self.revision_object, + attrs=["revision"], scope=ldb.SCOPE_BASE) + + expected_update = functional_level_to_max_update[functional_level] + + if old_functional_level: + min_update = functional_level_to_max_update[old_functional_level] + min_update += 1 + else: + min_update = MIN_UPDATE + + self.check_updates_range(min_update, expected_update) + + expected_version = functional_level_to_version[functional_level] + found_version = int(res[0]['revision'][0]) + if update_revision and found_version < expected_version: + if not self.fix: + raise DomainUpdateException("Revision is not high enough. Fix is set to False." + "\nExpected: %dGot: %d" % (expected_version, + found_version)) + self.samdb.modify_ldif("""dn: %s +changetype: modify +replace: revision +revision: %d +""" % (str(self.revision_object), expected_version)) + + def check_updates_iterator(self, iterator): + """ + Apply a list of updates which must be within the valid range of updates + :param iterator: Iterable specifying integer update numbers to apply + :raise DomainUpdateException: + """ + for op in iterator: + if op < MIN_UPDATE or op > MAX_UPDATE: + raise DomainUpdateException("Update number invalid.") + + # No LDIF file exists for the change + getattr(self, "operation_%d" % op)(op) + + def check_updates_range(self, start=0, end=0): + """ + Apply a range of updates which must be within the valid range of updates + :param start: integer update to begin + :param end: integer update to end (inclusive) + :raise DomainUpdateException: + """ + op = start + if start < MIN_UPDATE or start > end or end > MAX_UPDATE: + raise DomainUpdateException("Update number invalid.") + while op <= end: + if op not in missing_updates: + # No LDIF file exists for the change + getattr(self, "operation_%d" % op)(op) + + op += 1 + + def update_exists(self, op): + """ + :param op: Integer update number + :return: True if update exists else False + """ + update_dn = "CN=%s,%s" % (update_map[op], self.domainupdate_container) + try: + res = self.samdb.search(base=update_dn, + scope=ldb.SCOPE_BASE, + attrs=[]) + except ldb.LdbError as e: + (num, msg) = e.args + if num != ldb.ERR_NO_SUCH_OBJECT: + raise + return False + + assert len(res) == 1 + print("Skip Domain Update %u: %s" % (op, update_map[op])) + return True + + def update_add(self, op): + """ + Add the corresponding container object for the given update + :param op: Integer update + """ + self.samdb.add_ldif("""dn: CN=%s,%s +objectClass: container +""" % (update_map[op], str(self.domainupdate_container))) + print("Applied Domain Update %u: %s" % (op, update_map[op])) + + def raise_if_not_fix(self, op): + """ + Raises an exception if not set to fix. + :param op: Integer operation + :raise DomainUpdateException: + """ + if not self.fix: + raise DomainUpdateException("Missing operation %d. Fix is currently set to False" % op) + + # Create a new object CN=TPM Devices in the Domain partition. + def operation_78(self, op): + if self.update_exists(op): + return + self.raise_if_not_fix(op) + + self.samdb.add_ldif("""dn: CN=TPM Devices,%s +objectClass: top +objectClass: msTPM-InformationObjectsContainer +""" % self.domain_dn, + controls=["relax:0", "provision:0"]) + + if self.add_update_container: + self.update_add(op) + + # Created an access control entry for the TPM service. + def operation_79(self, op): + if self.update_exists(op): + return + self.raise_if_not_fix(op) + + ace = "(OA;CIIO;WP;ea1b7b93-5e48-46d5-bc6c-4df4fda78a35;bf967a86-0de6-11d0-a285-00aa003049e2;PS)" + + self.sd_utils.update_aces_in_dacl(self.domain_dn, add_aces=[ace]) + + if self.add_update_container: + self.update_add(op) + + # Grant "Clone DC" extended right to Cloneable Domain Controllers group + def operation_80(self, op): + if self.update_exists(op): + return + self.raise_if_not_fix(op) + + ace = "(OA;;CR;3e0f7e18-2c7a-4c10-ba82-4d926db99a3e;;CN)" + + self.sd_utils.update_aces_in_dacl(self.domain_dn, add_aces=[ace]) + + if self.add_update_container: + self.update_add(op) + + # Grant ms-DS-Allowed-To-Act-On-Behalf-Of-Other-Identity to Principal Self + # on all objects + def operation_81(self, op): + if self.update_exists(op): + return + self.raise_if_not_fix(op) + + ace = "(OA;CIOI;RPWP;3f78c3e5-f79a-46bd-a0b8-9d18116ddc79;;PS)" + + self.sd_utils.update_aces_in_dacl(self.domain_dn, add_aces=[ace]) + + if self.add_update_container: + self.update_add(op) + + # + # THE FOLLOWING ARE MISSING UPDATES FROM 2008 R2 + # + + # Add Managed Service Accounts container + def operation_75(self, op): + if self.update_exists(op): + return + self.raise_if_not_fix(op) + + descriptor = get_managed_service_accounts_descriptor(self.domain_sid) + managedservice_descr = b64encode(descriptor).decode('utf8') + managed_service_dn = "CN=Managed Service Accounts,%s" % \ + str(self.domain_dn) + + self.samdb.modify_ldif("""dn: %s +changetype: add +objectClass: container +description: Default container for managed service accounts +showInAdvancedViewOnly: FALSE +nTSecurityDescriptor:: %s""" % (managed_service_dn, managedservice_descr), + controls=["relax:0", "provision:0"]) + + if self.add_update_container: + self.update_add(op) + + # Add the otherWellKnownObjects reference to MSA + def operation_76(self, op): + if self.update_exists(op): + return + self.raise_if_not_fix(op) + + managed_service_dn = "CN=Managed Service Accounts,%s" % \ + str(self.domain_dn) + + self.samdb.modify_ldif("""dn: %s +changetype: modify +add: otherWellKnownObjects +otherWellKnownObjects: B:32:1EB93889E40C45DF9F0C64D23BBB6237:%s +""" % (str(self.domain_dn), managed_service_dn), controls=["relax:0", + "provision:0"]) + + if self.add_update_container: + self.update_add(op) + + # Add the PSPs object in the System container + def operation_77(self, op): + if self.update_exists(op): + return + self.raise_if_not_fix(op) + + self.samdb.add_ldif("""dn: CN=PSPs,CN=System,%s +objectClass: top +objectClass: msImaging-PSPs +""" % str(self.domain_dn), controls=["relax:0", "provision:0"]) + + if self.add_update_container: + self.update_add(op) + + ## ## Windows Server 2016: Domain-wide updates + ## + ## After the operations that are performed by domainprep in Windows + ## Server 2016 (operations 82-88) complete, the revision attribute for the + ## CN=ActiveDirectoryUpdate,CN=DomainUpdates,CN=System,DC=ForestRootDomain + ## object is set to 15. + + ## Operation 82: {83c53da7-427e-47a4-a07a-a324598b88f7} + ## + ## Create CN=Keys container at root of domain + ## + ## - objectClass: container + ## - description: Default container for key credential objects + ## - ShowInAdvancedViewOnly: TRUE + ## + ## (A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;EA) + ## (A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;DA) + ## (A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;SY) + ## (A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;DD) + ## (A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;ED) + ## + def operation_82(self, op): + if self.update_exists(op): + return + self.raise_if_not_fix(op) + + keys_dn = "CN=Keys,%s" % str(self.domain_dn) + + sddl = "O:DA" + sddl += "D:" + sddl += "(A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;EA)" + sddl += "(A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;DA)" + sddl += "(A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;SY)" + sddl += "(A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;DD)" + sddl += "(A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;ED)" + + ldif = """ +dn: %s +objectClass: container +description: Default container for key credential objects +ShowInAdvancedViewOnly: TRUE +nTSecurityDescriptor: %s +""" % (keys_dn, sddl) + + self.samdb.add_ldif(ldif) + + if self.add_update_container: + self.update_add(op) + + ## Operation 83: {c81fc9cc-0130-4fd1-b272-634d74818133} + ## + ## Add Full Control allow aces to CN=Keys container for "domain\Key Admins" + ## and "rootdomain\Enterprise Key Admins". + ## + ## (A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;Key Admins) + ## (A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;Enterprise Key Admins) + ## + def operation_83(self, op): + if self.update_exists(op): + return + self.raise_if_not_fix(op) + + keys_dn = "CN=Keys,%s" % str(self.domain_dn) + + aces = ["(A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;KA)"] + aces += ["(A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;EK)"] + + self.sd_utils.update_aces_in_dacl(keys_dn, add_aces=aces) + + if self.add_update_container: + self.update_add(op) + + + ## Operation 84: {e5f9e791-d96d-4fc9-93c9-d53e1dc439ba} + ## + ## Modify otherWellKnownObjects attribute to point to the CN=Keys container. + ## + ## - otherWellKnownObjects: B:32:683A24E2E8164BD3AF86AC3C2CF3F981:CN=Keys,%ws + def operation_84(self, op): + if self.update_exists(op): + return + self.raise_if_not_fix(op) + + keys_dn = "CN=Keys,%s" % str(self.domain_dn) + + ldif = """ +dn: %s +changetype: modify +add: otherWellKnownObjects +otherWellKnownObjects: B:32:683A24E2E8164BD3AF86AC3C2CF3F981:%s +""" % (str(self.domain_dn), keys_dn) + + self.samdb.modify_ldif(ldif) + + if self.add_update_container: + self.update_add(op) + + + ## Operation 85: {e6d5fd00-385d-4e65-b02d-9da3493ed850} + ## + ## Modify the domain NC to permit "domain\Key Admins" and + ## "rootdomain\Enterprise Key Admins" + ## to modify the msds-KeyCredentialLink attribute. + ## + ## (OA;CI;RPWP;5b47d60f-6090-40b2-9f37-2a4de88f3063;;Key Admins) + ## (OA;CI;RPWP;5b47d60f-6090-40b2-9f37-2a4de88f3063;;Enterprise Key Admins) + ## in root domain, but in non-root domains resulted in a bogus domain-relative + ## ACE with a non-resolvable -527 SID + ## + def operation_85(self, op): + if self.update_exists(op): + return + self.raise_if_not_fix(op) + + aces = ["(OA;CI;RPWP;5b47d60f-6090-40b2-9f37-2a4de88f3063;;KA)"] + # we use an explicit sid in order to replay the windows mistake + aces += ["(OA;CI;RPWP;5b47d60f-6090-40b2-9f37-2a4de88f3063;;%s-527)" % + str(self.domain_sid)] + + self.sd_utils.update_aces_in_dacl(self.domain_dn, add_aces=aces) + + if self.add_update_container: + self.update_add(op) + + + ## Operation 86: {3a6b3fbf-3168-4312-a10d-dd5b3393952d} + ## + ## Grant the DS-Validated-Write-Computer CAR to creator owner and self + ## + ## (OA;CIIO;SW;9b026da6-0d3c-465c-8bee-5199d7165cba;bf967a86-0de6-11d0-a285-00aa003049e2;PS) + ## (OA;CIIO;SW;9b026da6-0d3c-465c-8bee-5199d7165cba;bf967a86-0de6-11d0-a285-00aa003049e2;CO) + ## + def operation_86(self, op): + if self.update_exists(op): + return + self.raise_if_not_fix(op) + + aces = ["(OA;CIIO;SW;9b026da6-0d3c-465c-8bee-5199d7165cba;bf967a86-0de6-11d0-a285-00aa003049e2;PS)"] + aces += ["(OA;CIIO;SW;9b026da6-0d3c-465c-8bee-5199d7165cba;bf967a86-0de6-11d0-a285-00aa003049e2;CO)"] + + self.sd_utils.update_aces_in_dacl(self.domain_dn, add_aces=aces) + + if self.add_update_container: + self.update_add(op) + + ## Operation 87: {7f950403-0ab3-47f9-9730-5d7b0269f9bd} + ## + ## Delete the ACE granting Full Control to the incorrect + ## domain-relative Enterprise Key Admins group, and add + ## an ACE granting Full Control to Enterprise Key Admins group. + ## + ## Delete (A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;Enterprise Key Admins) + ## Add (A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;Enterprise Key Admins) + ## + def operation_87(self, op): + if self.update_exists(op): + return + self.raise_if_not_fix(op) + + # we use an explicit sid in order to replay the windows mistake + # note this is also strange for a 2nd reason because it doesn't + # delete: ["(OA;CI;RPWP;5b47d60f-6090-40b2-9f37-2a4de88f3063;;%s-527)" + # which was added in operation_85, so the del is basically a noop + # and the result is one additional ace + del_aces = ["(A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;%s-527)" % + str(self.domain_sid)] + add_aces = ["(A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;EK)"] + + self.sd_utils.update_aces_in_dacl(self.domain_dn, + del_aces=del_aces, + add_aces=add_aces) + + if self.add_update_container: + self.update_add(op) + + ## Operation 88: {434bb40d-dbc9-4fe7-81d4-d57229f7b080} + ## + ## Add "msDS-ExpirePasswordsOnSmartCardOnlyAccounts" on the domain NC object + ## and set default value to FALSE + ## + def operation_88(self, op): + if self.update_exists(op): + return + self.raise_if_not_fix(op) + + ldif = """ +dn: %s +changetype: modify +add: msDS-ExpirePasswordsOnSmartCardOnlyAccounts +msDS-ExpirePasswordsOnSmartCardOnlyAccounts: FALSE +""" % str(self.domain_dn) + + self.samdb.modify_ldif(ldif) + + if self.add_update_container: + self.update_add(op) + + ## Windows Server 2016 (operation 89) complete, the **revision** attribute for the + ## CN=ActiveDirectoryUpdate,CN=DomainUpdates,CN=System,DC=ForestRootDomain object + ## is set to **16**. + ## + + ## Operation 89: {a0c238ba-9e30-4ee6-80a6-43f731e9a5cd} + ## + ## Delete the ACE granting Full Control to Enterprise Key Admins and + ## add an ACE granting Enterprise Key Admins Full Control over just + ## the msdsKeyCredentialLink attribute. + ## + ## Delete (A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;Enterprise Key Admins) + ## Add (OA;CI;RPWP;5b47d60f-6090-40b2-9f37-2a4de88f3063;;Enterprise Key Admins)| + ## + def operation_89(self, op): + if self.update_exists(op): + return + self.raise_if_not_fix(op) + + # Note this only fixes the mistake from operation_87 + # but leaves the mistake of operation_85 if we're + # not in the root domain... + del_aces = ["(A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;EK)"] + add_aces = ["(OA;CI;RPWP;5b47d60f-6090-40b2-9f37-2a4de88f3063;;EK)"] + + self.sd_utils.update_aces_in_dacl(self.domain_dn, + del_aces=del_aces, + add_aces=add_aces) + + if self.add_update_container: + self.update_add(op) diff --git a/python/samba/drs_utils.py b/python/samba/drs_utils.py new file mode 100644 index 0000000..06e6928 --- /dev/null +++ b/python/samba/drs_utils.py @@ -0,0 +1,456 @@ +# DRS utility code +# +# Copyright Andrew Tridgell 2010 +# Copyright Andrew Bartlett 2017 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +from samba.dcerpc import drsuapi, misc, drsblobs +from samba.net import Net +from samba.ndr import ndr_unpack +from samba import dsdb +from samba import werror +from samba import WERRORError +import samba +import ldb +from samba.dcerpc.drsuapi import (DRSUAPI_ATTID_name, + DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V8, + DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V10) +import re + + +class drsException(Exception): + """Base element for drs errors""" + + def __init__(self, value): + self.value = value + + def __str__(self): + return "drsException: " + self.value + + +def drsuapi_connect(server, lp, creds, ip=None): + """Make a DRSUAPI connection to the server. + + :param server: the name of the server to connect to + :param lp: a samba line parameter object + :param creds: credential used for the connection + :param ip: Forced target server name + :return: A tuple with the drsuapi bind object, the drsuapi handle + and the supported extensions. + :raise drsException: if the connection fails + """ + + binding_options = "seal" + if lp.log_level() >= 9: + binding_options += ",print" + + # Allow forcing the IP + if ip is not None: + binding_options += f",target_hostname={server}" + binding_string = f"ncacn_ip_tcp:{ip}[{binding_options}]" + else: + binding_string = "ncacn_ip_tcp:%s[%s]" % (server, binding_options) + + try: + drsuapiBind = drsuapi.drsuapi(binding_string, lp, creds) + (drsuapiHandle, bindSupportedExtensions) = drs_DsBind(drsuapiBind) + except Exception as e: + raise drsException("DRS connection to %s failed: %s" % (server, e)) + + return (drsuapiBind, drsuapiHandle, bindSupportedExtensions) + + +def sendDsReplicaSync(drsuapiBind, drsuapi_handle, source_dsa_guid, + naming_context, req_option): + """Send DS replica sync request. + + :param drsuapiBind: a drsuapi Bind object + :param drsuapi_handle: a drsuapi handle on the drsuapi connection + :param source_dsa_guid: the guid of the source dsa for the replication + :param naming_context: the DN of the naming context to replicate + :param req_options: replication options for the DsReplicaSync call + :raise drsException: if any error occur while sending and receiving the + reply for the dsReplicaSync + """ + + nc = drsuapi.DsReplicaObjectIdentifier() + nc.dn = naming_context + + req1 = drsuapi.DsReplicaSyncRequest1() + req1.naming_context = nc + req1.options = req_option + req1.source_dsa_guid = misc.GUID(source_dsa_guid) + + try: + drsuapiBind.DsReplicaSync(drsuapi_handle, 1, req1) + except Exception as estr: + raise drsException("DsReplicaSync failed %s" % estr) + + +def drs_DsBind(drs): + """make a DsBind call, returning the binding handle""" + bind_info = drsuapi.DsBindInfoCtr() + bind_info.length = 28 + bind_info.info = drsuapi.DsBindInfo28() + bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_BASE + bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_ASYNC_REPLICATION + bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_REMOVEAPI + bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_MOVEREQ_V2 + bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHG_COMPRESS + bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V1 + bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_RESTORE_USN_OPTIMIZATION + bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_KCC_EXECUTE + bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_ADDENTRY_V2 + bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_LINKED_VALUE_REPLICATION + bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V2 + bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_INSTANCE_TYPE_NOT_REQ_ON_MOD + bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_CRYPTO_BIND + bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GET_REPL_INFO + bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_STRONG_ENCRYPTION + bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V01 + bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_TRANSITIVE_MEMBERSHIP + bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_ADD_SID_HISTORY + bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_POST_BETA3 + bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GET_MEMBERSHIPS2 + bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V6 + bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_NONDOMAIN_NCS + bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V8 + bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V5 + bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V6 + bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_ADDENTRYREPLY_V3 + bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V7 + bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_VERIFY_OBJECT + (info, handle) = drs.DsBind(misc.GUID(drsuapi.DRSUAPI_DS_BIND_GUID), bind_info) + + return (handle, info.info.supported_extensions) + + +def drs_get_rodc_partial_attribute_set(samdb): + """get a list of attributes for RODC replication""" + partial_attribute_set = drsuapi.DsPartialAttributeSet() + partial_attribute_set.version = 1 + + attids = [] + + # the exact list of attids we send is quite critical. Note that + # we do ask for the secret attributes, but set SPECIAL_SECRET_PROCESSING + # to zero them out + schema_dn = samdb.get_schema_basedn() + res = samdb.search(base=schema_dn, scope=ldb.SCOPE_SUBTREE, + expression="objectClass=attributeSchema", + attrs=["lDAPDisplayName", "systemFlags", + "searchFlags"]) + + for r in res: + ldap_display_name = str(r["lDAPDisplayName"][0]) + if "systemFlags" in r: + system_flags = r["systemFlags"][0] + if (int(system_flags) & (samba.dsdb.DS_FLAG_ATTR_NOT_REPLICATED | + samba.dsdb.DS_FLAG_ATTR_IS_CONSTRUCTED)): + continue + if "searchFlags" in r: + search_flags = r["searchFlags"][0] + if (int(search_flags) & samba.dsdb.SEARCH_FLAG_RODC_ATTRIBUTE): + continue + attid = samdb.get_attid_from_lDAPDisplayName(ldap_display_name) + attids.append(int(attid)) + + # the attids do need to be sorted, or windows doesn't return + # all the attributes we need + attids.sort() + partial_attribute_set.attids = attids + partial_attribute_set.num_attids = len(attids) + return partial_attribute_set + + +def drs_copy_highwater_mark(hwm, new_hwm): + """ + Copies the highwater mark by value, rather than by object reference. (This + avoids lingering talloc references to old GetNCChanges reply messages). + """ + hwm.tmp_highest_usn = new_hwm.tmp_highest_usn + hwm.reserved_usn = new_hwm.reserved_usn + hwm.highest_usn = new_hwm.highest_usn + + +class drs_Replicate(object): + """DRS replication calls""" + + def __init__(self, binding_string, lp, creds, samdb, invocation_id): + self.drs = drsuapi.drsuapi(binding_string, lp, creds) + (self.drs_handle, self.supports_ext) = drs_DsBind(self.drs) + self.net = Net(creds=creds, lp=lp) + self.samdb = samdb + if not isinstance(invocation_id, misc.GUID): + raise RuntimeError("Must supply GUID for invocation_id") + if invocation_id == misc.GUID("00000000-0000-0000-0000-000000000000"): + raise RuntimeError("Must not set GUID 00000000-0000-0000-0000-000000000000 as invocation_id") + self.replication_state = self.net.replicate_init(self.samdb, lp, self.drs, invocation_id) + self.more_flags = 0 + + def _should_retry_with_get_tgt(self, error_code, req): + + # If the error indicates we fail to resolve a target object for a + # linked attribute, then we should retry the request with GET_TGT + # (if we support it and haven't already tried that) + supports_ext = self.supports_ext + + return (error_code == werror.WERR_DS_DRA_RECYCLED_TARGET and + supports_ext & DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V10 and + (req.more_flags & drsuapi.DRSUAPI_DRS_GET_TGT) == 0) + + @staticmethod + def _should_calculate_missing_anc_locally(error_code, req): + # If the error indicates we fail to resolve the parent object + # for a new object, then we assume we are replicating from a + # buggy server (Samba 4.5 and earlier) that doesn't really + # understand how to implement GET_ANC + + return ((error_code == werror.WERR_DS_DRA_MISSING_PARENT) and + (req.replica_flags & drsuapi.DRSUAPI_DRS_GET_ANC) != 0) + + + def _calculate_missing_anc_locally(self, ctr): + self.guids_seen = set() + + # walk objects in ctr, add to guid_seen as we see them + # note if an object doesn't have a parent + + object_to_check = ctr.first_object + + while True: + if object_to_check is None: + break + + self.guids_seen.add(str(object_to_check.object.identifier.guid)) + + if object_to_check.parent_object_guid is not None \ + and object_to_check.parent_object_guid \ + != misc.GUID("00000000-0000-0000-0000-000000000000") \ + and str(object_to_check.parent_object_guid) not in self.guids_seen: + obj_dn = ldb.Dn(self.samdb, object_to_check.object.identifier.dn) + parent_dn = obj_dn.parent() + print(f"Object {parent_dn} with " + f"GUID {object_to_check.parent_object_guid} " + "was not sent by the server in this chunk") + + object_to_check = object_to_check.next_object + + + def process_chunk(self, level, ctr, schema, req_level, req, first_chunk): + """Processes a single chunk of received replication data""" + # pass the replication into the py_net.c python bindings for processing + self.net.replicate_chunk(self.replication_state, level, ctr, + schema=schema, req_level=req_level, req=req) + + def replicate(self, dn, source_dsa_invocation_id, destination_dsa_guid, + schema=False, exop=drsuapi.DRSUAPI_EXOP_NONE, rodc=False, + replica_flags=None, full_sync=True, sync_forced=False, more_flags=0): + """replicate a single DN""" + + # setup for a GetNCChanges call + if self.supports_ext & DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V10: + req = drsuapi.DsGetNCChangesRequest10() + req.more_flags = (more_flags | self.more_flags) + req_level = 10 + else: + req_level = 8 + req = drsuapi.DsGetNCChangesRequest8() + + req.destination_dsa_guid = destination_dsa_guid + req.source_dsa_invocation_id = source_dsa_invocation_id + req.naming_context = drsuapi.DsReplicaObjectIdentifier() + req.naming_context.dn = dn + + # Default to a full replication if we don't find an upToDatenessVector + udv = None + hwm = drsuapi.DsReplicaHighWaterMark() + hwm.tmp_highest_usn = 0 + hwm.reserved_usn = 0 + hwm.highest_usn = 0 + + if not full_sync: + res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE, + attrs=["repsFrom"]) + if "repsFrom" in res[0]: + for reps_from_packed in res[0]["repsFrom"]: + reps_from_obj = ndr_unpack(drsblobs.repsFromToBlob, reps_from_packed) + if reps_from_obj.ctr.source_dsa_invocation_id == source_dsa_invocation_id: + hwm = reps_from_obj.ctr.highwatermark + + udv = drsuapi.DsReplicaCursorCtrEx() + udv.version = 1 + udv.reserved1 = 0 + udv.reserved2 = 0 + + cursors_v1 = [] + cursors_v2 = dsdb._dsdb_load_udv_v2(self.samdb, + self.samdb.get_default_basedn()) + for cursor_v2 in cursors_v2: + cursor_v1 = drsuapi.DsReplicaCursor() + cursor_v1.source_dsa_invocation_id = cursor_v2.source_dsa_invocation_id + cursor_v1.highest_usn = cursor_v2.highest_usn + cursors_v1.append(cursor_v1) + + udv.cursors = cursors_v1 + udv.count = len(cursors_v1) + + req.highwatermark = hwm + req.uptodateness_vector = udv + + if replica_flags is not None: + req.replica_flags = replica_flags + elif exop == drsuapi.DRSUAPI_EXOP_REPL_SECRET: + req.replica_flags = 0 + else: + req.replica_flags = (drsuapi.DRSUAPI_DRS_INIT_SYNC | + drsuapi.DRSUAPI_DRS_PER_SYNC | + drsuapi.DRSUAPI_DRS_GET_ANC | + drsuapi.DRSUAPI_DRS_NEVER_SYNCED | + drsuapi.DRSUAPI_DRS_GET_ALL_GROUP_MEMBERSHIP) + if rodc: + req.replica_flags |= ( + drsuapi.DRSUAPI_DRS_SPECIAL_SECRET_PROCESSING) + else: + req.replica_flags |= drsuapi.DRSUAPI_DRS_WRIT_REP + + if sync_forced: + req.replica_flags |= drsuapi.DRSUAPI_DRS_SYNC_FORCED + + req.max_object_count = 402 + req.max_ndr_size = 402116 + req.extended_op = exop + req.fsmo_info = 0 + req.partial_attribute_set = None + req.partial_attribute_set_ex = None + req.mapping_ctr.num_mappings = 0 + req.mapping_ctr.mappings = None + + if not schema and rodc: + req.partial_attribute_set = drs_get_rodc_partial_attribute_set(self.samdb) + + if not self.supports_ext & DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V8: + req_level = 5 + req5 = drsuapi.DsGetNCChangesRequest5() + for a in dir(req5): + if a[0] != '_': + setattr(req5, a, getattr(req, a)) + req = req5 + + num_objects = 0 + num_links = 0 + first_chunk = True + + while True: + (level, ctr) = self.drs.DsGetNCChanges(self.drs_handle, req_level, req) + if ctr.first_object is None and ctr.object_count != 0: + raise RuntimeError("DsGetNCChanges: NULL first_object with object_count=%u" % (ctr.object_count)) + + try: + self.process_chunk(level, ctr, schema, req_level, req, first_chunk) + except WERRORError as e: + # Check if retrying with the GET_TGT flag set might resolve this error + if self._should_retry_with_get_tgt(e.args[0], req): + + print("Missing target object - retrying with DRS_GET_TGT") + req.more_flags |= drsuapi.DRSUAPI_DRS_GET_TGT + + # try sending the request again (this has the side-effect + # of causing the DC to restart the replication from scratch) + first_chunk = True + continue + + if self._should_calculate_missing_anc_locally(e.args[0], + req): + print("Missing parent object - calculating missing objects locally") + + self._calculate_missing_anc_locally(ctr) + raise e + + first_chunk = False + num_objects += ctr.object_count + + # Cope with servers that do not return level 6, so do not return any links + try: + num_links += ctr.linked_attributes_count + except AttributeError: + pass + + if ctr.more_data == 0: + break + + # update the request's HWM so we get the next chunk + drs_copy_highwater_mark(req.highwatermark, ctr.new_highwatermark) + + return (num_objects, num_links) + + +# Handles the special case of creating a new clone of a DB, while also renaming +# the entire DB's objects on the way through +class drs_ReplicateRenamer(drs_Replicate): + """Uses DRS replication to rename the entire DB""" + + def __init__(self, binding_string, lp, creds, samdb, invocation_id, + old_base_dn, new_base_dn): + super().__init__(binding_string, lp, creds, samdb, invocation_id) + self.old_base_dn = old_base_dn + self.new_base_dn = new_base_dn + + # because we're renaming the DNs, we know we're going to have trouble + # resolving link targets. Normally we'd get to the end of replication + # only to find we need to retry the whole replication with the GET_TGT + # flag set. Always setting the GET_TGT flag avoids this extra work. + self.more_flags = drsuapi.DRSUAPI_DRS_GET_TGT + + def rename_dn(self, dn_str): + """Uses string substitution to replace the base DN""" + return re.sub('%s$' % self.old_base_dn, self.new_base_dn, dn_str) + + def update_name_attr(self, base_obj): + """Updates the 'name' attribute for the base DN object""" + for attr in base_obj.attribute_ctr.attributes: + if attr.attid == DRSUAPI_ATTID_name: + base_dn = ldb.Dn(self.samdb, base_obj.identifier.dn) + new_name = base_dn.get_rdn_value() + attr.value_ctr.values[0].blob = new_name.encode('utf-16-le') + + def rename_top_level_object(self, first_obj): + """Renames the first/top-level object in a partition""" + old_dn = first_obj.identifier.dn + first_obj.identifier.dn = self.rename_dn(first_obj.identifier.dn) + print("Renaming partition %s --> %s" % (old_dn, + first_obj.identifier.dn)) + + # we also need to fix up the 'name' attribute for the base DN, + # otherwise the RDNs won't match + if first_obj.identifier.dn == self.new_base_dn: + self.update_name_attr(first_obj) + + def process_chunk(self, level, ctr, schema, req_level, req, first_chunk): + """Processes a single chunk of received replication data""" + + # we need to rename the NC in every chunk - this gets used in searches + # when applying the chunk + if ctr.naming_context: + ctr.naming_context.dn = self.rename_dn(ctr.naming_context.dn) + + # rename the first object in each partition. This will cause every + # subsequent object in the partition to be renamed as a side-effect + if first_chunk and ctr.object_count != 0: + self.rename_top_level_object(ctr.first_object.object) + + # then do the normal repl processing to apply this chunk to our DB + super().process_chunk(level, ctr, schema, req_level, req, first_chunk) diff --git a/python/samba/emulate/__init__.py b/python/samba/emulate/__init__.py new file mode 100644 index 0000000..110e19d --- /dev/null +++ b/python/samba/emulate/__init__.py @@ -0,0 +1,16 @@ +# Package level initialisation +# +# Copyright (C) Catalyst IT Ltd. 2017 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . diff --git a/python/samba/emulate/traffic.py b/python/samba/emulate/traffic.py new file mode 100644 index 0000000..4811fe8 --- /dev/null +++ b/python/samba/emulate/traffic.py @@ -0,0 +1,2415 @@ +# -*- encoding: utf-8 -*- +# Samba traffic replay and learning +# +# Copyright (C) Catalyst IT Ltd. 2017 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import time +import os +import random +import json +import math +import sys +import signal +from errno import ECHILD, ESRCH + +from collections import OrderedDict, Counter, defaultdict, namedtuple +from dns.resolver import query as dns_query + +from samba.emulate import traffic_packets +from samba.samdb import SamDB +import ldb +from ldb import LdbError +from samba.dcerpc import ClientConnection +from samba.dcerpc import security, drsuapi, lsa +from samba.dcerpc import netlogon +from samba.dcerpc.netlogon import netr_Authenticator +from samba.dcerpc import srvsvc +from samba.dcerpc import samr +from samba.drs_utils import drs_DsBind +import traceback +from samba.credentials import Credentials, DONT_USE_KERBEROS, MUST_USE_KERBEROS +from samba.auth import system_session +from samba.dsdb import ( + UF_NORMAL_ACCOUNT, + UF_SERVER_TRUST_ACCOUNT, + UF_TRUSTED_FOR_DELEGATION, + UF_WORKSTATION_TRUST_ACCOUNT +) +from samba.dcerpc.misc import SEC_CHAN_BDC +from samba import gensec +from samba import sd_utils +from samba.common import get_string +from samba.logger import get_samba_logger +import bisect + +CURRENT_MODEL_VERSION = 2 # save as this +REQUIRED_MODEL_VERSION = 2 # load accepts this or greater +SLEEP_OVERHEAD = 3e-4 + +# we don't use None, because it complicates [de]serialisation +NON_PACKET = '-' + +CLIENT_CLUES = { + ('dns', '0'): 1.0, # query + ('smb', '0x72'): 1.0, # Negotiate protocol + ('ldap', '0'): 1.0, # bind + ('ldap', '3'): 1.0, # searchRequest + ('ldap', '2'): 1.0, # unbindRequest + ('cldap', '3'): 1.0, + ('dcerpc', '11'): 1.0, # bind + ('dcerpc', '14'): 1.0, # Alter_context + ('nbns', '0'): 1.0, # query +} + +SERVER_CLUES = { + ('dns', '1'): 1.0, # response + ('ldap', '1'): 1.0, # bind response + ('ldap', '4'): 1.0, # search result + ('ldap', '5'): 1.0, # search done + ('cldap', '5'): 1.0, + ('dcerpc', '12'): 1.0, # bind_ack + ('dcerpc', '13'): 1.0, # bind_nak + ('dcerpc', '15'): 1.0, # Alter_context response +} + +SKIPPED_PROTOCOLS = {"smb", "smb2", "browser", "smb_netlogon"} + +WAIT_SCALE = 10.0 +WAIT_THRESHOLD = (1.0 / WAIT_SCALE) +NO_WAIT_LOG_TIME_RANGE = (-10, -3) + +# DEBUG_LEVEL can be changed by scripts with -d +DEBUG_LEVEL = 0 + +LOGGER = get_samba_logger(name=__name__) + + +def debug(level, msg, *args): + """Print a formatted debug message to standard error. + + + :param level: The debug level, message will be printed if it is <= the + currently set debug level. The debug level can be set with + the -d option. + :param msg: The message to be logged, can contain C-Style format + specifiers + :param args: The parameters required by the format specifiers + """ + if level <= DEBUG_LEVEL: + if not args: + print(msg, file=sys.stderr) + else: + print(msg % tuple(args), file=sys.stderr) + + +def debug_lineno(*args): + """ Print an unformatted log message to stderr, containing the line number + """ + tb = traceback.extract_stack(limit=2) + print((" %s:" "\033[01;33m" + "%s " "\033[00m" % (tb[0][2], tb[0][1])), end=' ', + file=sys.stderr) + for a in args: + print(a, file=sys.stderr) + print(file=sys.stderr) + sys.stderr.flush() + + +def random_colour_print(seeds): + """Return a function that prints a coloured line to stderr. The colour + of the line depends on a sort of hash of the integer arguments.""" + if seeds: + s = 214 + for x in seeds: + s += 17 + s *= x + s %= 214 + prefix = "\033[38;5;%dm" % (18 + s) + + def p(*args): + if DEBUG_LEVEL > 0: + for a in args: + print("%s%s\033[00m" % (prefix, a), file=sys.stderr) + else: + def p(*args): + if DEBUG_LEVEL > 0: + for a in args: + print(a, file=sys.stderr) + + return p + + +class FakePacketError(Exception): + pass + + +class Packet(object): + """Details of a network packet""" + __slots__ = ('timestamp', + 'ip_protocol', + 'stream_number', + 'src', + 'dest', + 'protocol', + 'opcode', + 'desc', + 'extra', + 'endpoints') + def __init__(self, timestamp, ip_protocol, stream_number, src, dest, + protocol, opcode, desc, extra): + self.timestamp = timestamp + self.ip_protocol = ip_protocol + self.stream_number = stream_number + self.src = src + self.dest = dest + self.protocol = protocol + self.opcode = opcode + self.desc = desc + self.extra = extra + if self.src < self.dest: + self.endpoints = (self.src, self.dest) + else: + self.endpoints = (self.dest, self.src) + + @classmethod + def from_line(cls, line): + fields = line.rstrip('\n').split('\t') + (timestamp, + ip_protocol, + stream_number, + src, + dest, + protocol, + opcode, + desc) = fields[:8] + extra = fields[8:] + + timestamp = float(timestamp) + src = int(src) + dest = int(dest) + + return cls(timestamp, ip_protocol, stream_number, src, dest, + protocol, opcode, desc, extra) + + def as_summary(self, time_offset=0.0): + """Format the packet as a traffic_summary line. + """ + extra = '\t'.join(self.extra) + t = self.timestamp + time_offset + return (t, '%f\t%s\t%s\t%d\t%d\t%s\t%s\t%s\t%s' % + (t, + self.ip_protocol, + self.stream_number or '', + self.src, + self.dest, + self.protocol, + self.opcode, + self.desc, + extra)) + + def __str__(self): + return ("%.3f: %d -> %d; ip %s; strm %s; prot %s; op %s; desc %s %s" % + (self.timestamp, self.src, self.dest, self.ip_protocol or '-', + self.stream_number, self.protocol, self.opcode, self.desc, + ('«' + ' '.join(self.extra) + '»' if self.extra else ''))) + + def __repr__(self): + return "" % self + + def copy(self): + return self.__class__(self.timestamp, + self.ip_protocol, + self.stream_number, + self.src, + self.dest, + self.protocol, + self.opcode, + self.desc, + self.extra) + + def as_packet_type(self): + t = '%s:%s' % (self.protocol, self.opcode) + return t + + def client_score(self): + """A positive number means we think it is a client; a negative number + means we think it is a server. Zero means no idea. range: -1 to 1. + """ + key = (self.protocol, self.opcode) + if key in CLIENT_CLUES: + return CLIENT_CLUES[key] + if key in SERVER_CLUES: + return -SERVER_CLUES[key] + return 0.0 + + def play(self, conversation, context): + """Send the packet over the network, if required. + + Some packets are ignored, i.e. for protocols not handled, + server response messages, or messages that are generated by the + protocol layer associated with other packets. + """ + fn_name = 'packet_%s_%s' % (self.protocol, self.opcode) + try: + fn = getattr(traffic_packets, fn_name) + + except AttributeError as e: + print("Conversation(%s) Missing handler %s" % + (conversation.conversation_id, fn_name), + file=sys.stderr) + return + + # Don't display a message for kerberos packets, they're not directly + # generated they're used to indicate kerberos should be used + if self.protocol != "kerberos": + debug(2, "Conversation(%s) Calling handler %s" % + (conversation.conversation_id, fn_name)) + + start = time.time() + try: + if fn(self, conversation, context): + # Only collect timing data for functions that generate + # network traffic, or fail + end = time.time() + duration = end - start + print("%f\t%s\t%s\t%s\t%f\tTrue\t" % + (end, conversation.conversation_id, self.protocol, + self.opcode, duration)) + except Exception as e: + end = time.time() + duration = end - start + print("%f\t%s\t%s\t%s\t%f\tFalse\t%s" % + (end, conversation.conversation_id, self.protocol, + self.opcode, duration, e)) + + def __cmp__(self, other): + return self.timestamp - other.timestamp + + def is_really_a_packet(self, missing_packet_stats=None): + return is_a_real_packet(self.protocol, self.opcode) + + +def is_a_real_packet(protocol, opcode): + """Is the packet one that can be ignored? + + If so removing it will have no effect on the replay + """ + if protocol in SKIPPED_PROTOCOLS: + # Ignore any packets for the protocols we're not interested in. + return False + if protocol == "ldap" and opcode == '': + # skip ldap continuation packets + return False + + fn_name = 'packet_%s_%s' % (protocol, opcode) + fn = getattr(traffic_packets, fn_name, None) + if fn is None: + LOGGER.debug("missing packet %s" % fn_name, file=sys.stderr) + return False + if fn is traffic_packets.null_packet: + return False + return True + + +def is_a_traffic_generating_packet(protocol, opcode): + """Return true if a packet generates traffic in its own right. Some of + these will generate traffic in certain contexts (e.g. ldap unbind + after a bind) but not if the conversation consists only of these packets. + """ + if protocol == 'wait': + return False + + if (protocol, opcode) in ( + ('kerberos', ''), + ('ldap', '2'), + ('dcerpc', '15'), + ('dcerpc', '16')): + return False + + return is_a_real_packet(protocol, opcode) + + +class ReplayContext(object): + """State/Context for a conversation between an simulated client and a + server. Some of the context is shared amongst all conversations + and should be generated before the fork, while other context is + specific to a particular conversation and should be generated + *after* the fork, in generate_process_local_config(). + """ + def __init__(self, + server=None, + lp=None, + creds=None, + total_conversations=None, + badpassword_frequency=None, + prefer_kerberos=None, + tempdir=None, + statsdir=None, + ou=None, + base_dn=None, + domain=os.environ.get("DOMAIN"), + domain_sid=None, + instance_id=None): + self.server = server + self.netlogon_connection = None + self.creds = creds + self.lp = lp + if prefer_kerberos: + self.kerberos_state = MUST_USE_KERBEROS + else: + self.kerberos_state = DONT_USE_KERBEROS + self.ou = ou + self.base_dn = base_dn + self.domain = domain + self.statsdir = statsdir + self.global_tempdir = tempdir + self.domain_sid = domain_sid + self.realm = lp.get('realm') + self.instance_id = instance_id + + # Bad password attempt controls + self.badpassword_frequency = badpassword_frequency + self.last_lsarpc_bad = False + self.last_lsarpc_named_bad = False + self.last_simple_bind_bad = False + self.last_bind_bad = False + self.last_srvsvc_bad = False + self.last_drsuapi_bad = False + self.last_netlogon_bad = False + self.last_samlogon_bad = False + self.total_conversations = total_conversations + self.generate_ldap_search_tables() + + def generate_ldap_search_tables(self): + session = system_session() + + db = SamDB(url="ldap://%s" % self.server, + session_info=session, + credentials=self.creds, + lp=self.lp) + + res = db.search(db.domain_dn(), + scope=ldb.SCOPE_SUBTREE, + controls=["paged_results:1:1000"], + attrs=['dn']) + + # find a list of dns for each pattern + # e.g. CN,CN,CN,DC,DC + dn_map = {} + attribute_clue_map = { + 'invocationId': [] + } + + for r in res: + dn = str(r.dn) + pattern = ','.join(x.lstrip()[:2] for x in dn.split(',')).upper() + dns = dn_map.setdefault(pattern, []) + dns.append(dn) + if dn.startswith('CN=NTDS Settings,'): + attribute_clue_map['invocationId'].append(dn) + + # extend the map in case we are working with a different + # number of DC components. + # for k, v in self.dn_map.items(): + # print >>sys.stderr, k, len(v) + + for k in list(dn_map.keys()): + if k[-3:] != ',DC': + continue + p = k[:-3] + while p[-3:] == ',DC': + p = p[:-3] + for i in range(5): + p += ',DC' + if p != k and p in dn_map: + print('dn_map collision %s %s' % (k, p), + file=sys.stderr) + continue + dn_map[p] = dn_map[k] + + self.dn_map = dn_map + self.attribute_clue_map = attribute_clue_map + + # pre-populate DN-based search filters (it's simplest to generate them + # once, when the test starts). These are used by guess_search_filter() + # to avoid full-scans + self.search_filters = {} + + # lookup all the GPO DNs + res = db.search(db.domain_dn(), scope=ldb.SCOPE_SUBTREE, attrs=['dn'], + expression='(objectclass=groupPolicyContainer)') + gpos_by_dn = "".join("(distinguishedName={0})".format(msg['dn']) for msg in res) + + # a search for the 'gPCFileSysPath' attribute is probably a GPO search + # (as per the MS-GPOL spec) which searches for GPOs by DN + self.search_filters['gPCFileSysPath'] = "(|{0})".format(gpos_by_dn) + + # likewise, a search for gpLink is probably the Domain SOM search part + # of the MS-GPOL, in which case it's looking up a few OUs by DN + ou_str = "" + for ou in ["Domain Controllers,", "traffic_replay,", ""]: + ou_str += "(distinguishedName={0}{1})".format(ou, db.domain_dn()) + self.search_filters['gpLink'] = "(|{0})".format(ou_str) + + # The CEP Web Service can query the AD DC to get pKICertificateTemplate + # objects (as per MS-WCCE) + self.search_filters['pKIExtendedKeyUsage'] = \ + '(objectCategory=pKICertificateTemplate)' + + # assume that anything querying the usnChanged is some kind of + # synchronization tool, e.g. AD Change Detection Connector + res = db.search('', scope=ldb.SCOPE_BASE, attrs=['highestCommittedUSN']) + self.search_filters['usnChanged'] = \ + '(usnChanged>={0})'.format(res[0]['highestCommittedUSN']) + + # The traffic_learner script doesn't preserve the LDAP search filter, and + # having no filter can result in a full DB scan. This is costly for a large + # DB, and not necessarily representative of real world traffic. As there + # several standard LDAP queries that get used by AD tools, we can apply + # some logic and guess what the search filter might have been originally. + def guess_search_filter(self, attrs, dn_sig, dn): + + # there are some standard spec-based searches that query fairly unique + # attributes. Check if the search is likely one of these + for key in self.search_filters.keys(): + if key in attrs: + return self.search_filters[key] + + # if it's the top-level domain, assume we're looking up a single user, + # e.g. like powershell Get-ADUser or a similar tool + if dn_sig == 'DC,DC': + random_user_id = random.random() % self.total_conversations + account_name = user_name(self.instance_id, random_user_id) + return '(&(sAMAccountName=%s)(objectClass=user))' % account_name + + # otherwise just return everything in the sub-tree + return '(objectClass=*)' + + def generate_process_local_config(self, account, conversation): + self.ldap_connections = [] + self.dcerpc_connections = [] + self.lsarpc_connections = [] + self.lsarpc_connections_named = [] + self.drsuapi_connections = [] + self.srvsvc_connections = [] + self.samr_contexts = [] + self.netbios_name = account.netbios_name + self.machinepass = account.machinepass + self.username = account.username + self.userpass = account.userpass + + self.tempdir = mk_masked_dir(self.global_tempdir, + 'conversation-%d' % + conversation.conversation_id) + + self.lp.set("private dir", self.tempdir) + self.lp.set("lock dir", self.tempdir) + self.lp.set("state directory", self.tempdir) + self.lp.set("tls verify peer", "no_check") + + self.remoteAddress = "/root/ncalrpc_as_system" + self.samlogon_dn = ("cn=%s,%s" % + (self.netbios_name, self.ou)) + self.user_dn = ("cn=%s,%s" % + (self.username, self.ou)) + + self.generate_machine_creds() + self.generate_user_creds() + + def with_random_bad_credentials(self, f, good, bad, failed_last_time): + """Execute the supplied logon function, randomly choosing the + bad credentials. + + Based on the frequency in badpassword_frequency randomly perform the + function with the supplied bad credentials. + If run with bad credentials, the function is re-run with the good + credentials. + failed_last_time is used to prevent consecutive bad credential + attempts. So the over all bad credential frequency will be lower + than that requested, but not significantly. + """ + if not failed_last_time: + if (self.badpassword_frequency and + random.random() < self.badpassword_frequency): + try: + f(bad) + except Exception: + # Ignore any exceptions as the operation may fail + # as it's being performed with bad credentials + pass + failed_last_time = True + else: + failed_last_time = False + + result = f(good) + return (result, failed_last_time) + + def generate_user_creds(self): + """Generate the conversation specific user Credentials. + + Each Conversation has an associated user account used to simulate + any non Administrative user traffic. + + Generates user credentials with good and bad passwords and ldap + simple bind credentials with good and bad passwords. + """ + self.user_creds = Credentials() + self.user_creds.guess(self.lp) + self.user_creds.set_workstation(self.netbios_name) + self.user_creds.set_password(self.userpass) + self.user_creds.set_username(self.username) + self.user_creds.set_domain(self.domain) + self.user_creds.set_kerberos_state(self.kerberos_state) + + self.user_creds_bad = Credentials() + self.user_creds_bad.guess(self.lp) + self.user_creds_bad.set_workstation(self.netbios_name) + self.user_creds_bad.set_password(self.userpass[:-4]) + self.user_creds_bad.set_username(self.username) + self.user_creds_bad.set_kerberos_state(self.kerberos_state) + + # Credentials for ldap simple bind. + self.simple_bind_creds = Credentials() + self.simple_bind_creds.guess(self.lp) + self.simple_bind_creds.set_workstation(self.netbios_name) + self.simple_bind_creds.set_password(self.userpass) + self.simple_bind_creds.set_username(self.username) + self.simple_bind_creds.set_gensec_features( + self.simple_bind_creds.get_gensec_features() | gensec.FEATURE_SEAL) + self.simple_bind_creds.set_kerberos_state(self.kerberos_state) + self.simple_bind_creds.set_bind_dn(self.user_dn) + + self.simple_bind_creds_bad = Credentials() + self.simple_bind_creds_bad.guess(self.lp) + self.simple_bind_creds_bad.set_workstation(self.netbios_name) + self.simple_bind_creds_bad.set_password(self.userpass[:-4]) + self.simple_bind_creds_bad.set_username(self.username) + self.simple_bind_creds_bad.set_gensec_features( + self.simple_bind_creds_bad.get_gensec_features() | + gensec.FEATURE_SEAL) + self.simple_bind_creds_bad.set_kerberos_state(self.kerberos_state) + self.simple_bind_creds_bad.set_bind_dn(self.user_dn) + + def generate_machine_creds(self): + """Generate the conversation specific machine Credentials. + + Each Conversation has an associated machine account. + + Generates machine credentials with good and bad passwords. + """ + + self.machine_creds = Credentials() + self.machine_creds.guess(self.lp) + self.machine_creds.set_workstation(self.netbios_name) + self.machine_creds.set_secure_channel_type(SEC_CHAN_BDC) + self.machine_creds.set_password(self.machinepass) + self.machine_creds.set_username(self.netbios_name + "$") + self.machine_creds.set_domain(self.domain) + self.machine_creds.set_kerberos_state(self.kerberos_state) + + self.machine_creds_bad = Credentials() + self.machine_creds_bad.guess(self.lp) + self.machine_creds_bad.set_workstation(self.netbios_name) + self.machine_creds_bad.set_secure_channel_type(SEC_CHAN_BDC) + self.machine_creds_bad.set_password(self.machinepass[:-4]) + self.machine_creds_bad.set_username(self.netbios_name + "$") + self.machine_creds_bad.set_kerberos_state(self.kerberos_state) + + def get_matching_dn(self, pattern, attributes=None): + # If the pattern is an empty string, we assume ROOTDSE, + # Otherwise we try adding or removing DC suffixes, then + # shorter leading patterns until we hit one. + # e.g if there is no CN,CN,CN,CN,DC,DC + # we first try CN,CN,CN,CN,DC + # and CN,CN,CN,CN,DC,DC,DC + # then change to CN,CN,CN,DC,DC + # and as last resort we use the base_dn + attr_clue = self.attribute_clue_map.get(attributes) + if attr_clue: + return random.choice(attr_clue) + + pattern = pattern.upper() + while pattern: + if pattern in self.dn_map: + return random.choice(self.dn_map[pattern]) + # chop one off the front and try it all again. + pattern = pattern[3:] + + return self.base_dn + + def get_dcerpc_connection(self, new=False): + guid = '12345678-1234-abcd-ef00-01234567cffb' # RPC_NETLOGON UUID + if self.dcerpc_connections and not new: + return self.dcerpc_connections[-1] + c = ClientConnection("ncacn_ip_tcp:%s" % self.server, + (guid, 1), self.lp) + self.dcerpc_connections.append(c) + return c + + def get_srvsvc_connection(self, new=False): + if self.srvsvc_connections and not new: + return self.srvsvc_connections[-1] + + def connect(creds): + return srvsvc.srvsvc("ncacn_np:%s" % (self.server), + self.lp, + creds) + + (c, self.last_srvsvc_bad) = \ + self.with_random_bad_credentials(connect, + self.user_creds, + self.user_creds_bad, + self.last_srvsvc_bad) + + self.srvsvc_connections.append(c) + return c + + def get_lsarpc_connection(self, new=False): + if self.lsarpc_connections and not new: + return self.lsarpc_connections[-1] + + def connect(creds): + binding_options = 'schannel,seal,sign' + return lsa.lsarpc("ncacn_ip_tcp:%s[%s]" % + (self.server, binding_options), + self.lp, + creds) + + (c, self.last_lsarpc_bad) = \ + self.with_random_bad_credentials(connect, + self.machine_creds, + self.machine_creds_bad, + self.last_lsarpc_bad) + + self.lsarpc_connections.append(c) + return c + + def get_lsarpc_named_pipe_connection(self, new=False): + if self.lsarpc_connections_named and not new: + return self.lsarpc_connections_named[-1] + + def connect(creds): + return lsa.lsarpc("ncacn_np:%s" % (self.server), + self.lp, + creds) + + (c, self.last_lsarpc_named_bad) = \ + self.with_random_bad_credentials(connect, + self.machine_creds, + self.machine_creds_bad, + self.last_lsarpc_named_bad) + + self.lsarpc_connections_named.append(c) + return c + + def get_drsuapi_connection_pair(self, new=False, unbind=False): + """get a (drs, drs_handle) tuple""" + if self.drsuapi_connections and not new: + c = self.drsuapi_connections[-1] + return c + + def connect(creds): + binding_options = 'seal' + binding_string = "ncacn_ip_tcp:%s[%s]" %\ + (self.server, binding_options) + return drsuapi.drsuapi(binding_string, self.lp, creds) + + (drs, self.last_drsuapi_bad) = \ + self.with_random_bad_credentials(connect, + self.user_creds, + self.user_creds_bad, + self.last_drsuapi_bad) + + (drs_handle, supported_extensions) = drs_DsBind(drs) + c = (drs, drs_handle) + self.drsuapi_connections.append(c) + return c + + def get_ldap_connection(self, new=False, simple=False): + if self.ldap_connections and not new: + return self.ldap_connections[-1] + + def simple_bind(creds): + """ + To run simple bind against Windows, we need to run + following commands in PowerShell: + + Install-windowsfeature ADCS-Cert-Authority + Install-AdcsCertificationAuthority -CAType EnterpriseRootCA + Restart-Computer + + """ + return SamDB('ldaps://%s' % self.server, + credentials=creds, + lp=self.lp) + + def sasl_bind(creds): + return SamDB('ldap://%s' % self.server, + credentials=creds, + lp=self.lp) + if simple: + (samdb, self.last_simple_bind_bad) = \ + self.with_random_bad_credentials(simple_bind, + self.simple_bind_creds, + self.simple_bind_creds_bad, + self.last_simple_bind_bad) + else: + (samdb, self.last_bind_bad) = \ + self.with_random_bad_credentials(sasl_bind, + self.user_creds, + self.user_creds_bad, + self.last_bind_bad) + + self.ldap_connections.append(samdb) + return samdb + + def get_samr_context(self, new=False): + if not self.samr_contexts or new: + self.samr_contexts.append( + SamrContext(self.server, lp=self.lp, creds=self.creds)) + return self.samr_contexts[-1] + + def get_netlogon_connection(self): + + if self.netlogon_connection: + return self.netlogon_connection + + def connect(creds): + return netlogon.netlogon("ncacn_ip_tcp:%s[schannel,seal]" % + (self.server), + self.lp, + creds) + (c, self.last_netlogon_bad) = \ + self.with_random_bad_credentials(connect, + self.machine_creds, + self.machine_creds_bad, + self.last_netlogon_bad) + self.netlogon_connection = c + return c + + def guess_a_dns_lookup(self): + return (self.realm, 'A') + + def get_authenticator(self): + auth = self.machine_creds.new_client_authenticator() + current = netr_Authenticator() + current.cred.data = [x if isinstance(x, int) else ord(x) + for x in auth["credential"]] + current.timestamp = auth["timestamp"] + + subsequent = netr_Authenticator() + return (current, subsequent) + + def write_stats(self, filename, **kwargs): + """Write arbitrary key/value pairs to a file in our stats directory in + order for them to be picked up later by another process working out + statistics.""" + filename = os.path.join(self.statsdir, filename) + f = open(filename, 'w') + for k, v in kwargs.items(): + print("%s: %s" % (k, v), file=f) + f.close() + + +class SamrContext(object): + """State/Context associated with a samr connection. + """ + def __init__(self, server, lp=None, creds=None): + self.connection = None + self.handle = None + self.domain_handle = None + self.domain_sid = None + self.group_handle = None + self.user_handle = None + self.rids = None + self.server = server + self.lp = lp + self.creds = creds + + def get_connection(self): + if not self.connection: + self.connection = samr.samr( + "ncacn_ip_tcp:%s[seal]" % (self.server), + lp_ctx=self.lp, + credentials=self.creds) + + return self.connection + + def get_handle(self): + if not self.handle: + c = self.get_connection() + self.handle = c.Connect2(None, security.SEC_FLAG_MAXIMUM_ALLOWED) + return self.handle + + +class Conversation(object): + """Details of a converation between a simulated client and a server.""" + def __init__(self, start_time=None, endpoints=None, seq=(), + conversation_id=None): + self.start_time = start_time + self.endpoints = endpoints + self.packets = [] + self.msg = random_colour_print(endpoints) + self.client_balance = 0.0 + self.conversation_id = conversation_id + for p in seq: + self.add_short_packet(*p) + + def __cmp__(self, other): + if self.start_time is None: + if other.start_time is None: + return 0 + return -1 + if other.start_time is None: + return 1 + return self.start_time - other.start_time + + def add_packet(self, packet): + """Add a packet object to this conversation, making a local copy with + a conversation-relative timestamp.""" + p = packet.copy() + + if self.start_time is None: + self.start_time = p.timestamp + + if self.endpoints is None: + self.endpoints = p.endpoints + + if p.endpoints != self.endpoints: + raise FakePacketError("Conversation endpoints %s don't match" + "packet endpoints %s" % + (self.endpoints, p.endpoints)) + + p.timestamp -= self.start_time + + if p.src == p.endpoints[0]: + self.client_balance -= p.client_score() + else: + self.client_balance += p.client_score() + + if p.is_really_a_packet(): + self.packets.append(p) + + def add_short_packet(self, timestamp, protocol, opcode, extra, + client=True, skip_unused_packets=True): + """Create a packet from a timestamp, and 'protocol:opcode' pair, and a + (possibly empty) list of extra data. If client is True, assume + this packet is from the client to the server. + """ + if skip_unused_packets and not is_a_real_packet(protocol, opcode): + return + + src, dest = self.guess_client_server() + if not client: + src, dest = dest, src + key = (protocol, opcode) + desc = OP_DESCRIPTIONS.get(key, '') + ip_protocol = IP_PROTOCOLS.get(protocol, '06') + packet = Packet(timestamp - self.start_time, ip_protocol, + '', src, dest, + protocol, opcode, desc, extra) + # XXX we're assuming the timestamp is already adjusted for + # this conversation? + # XXX should we adjust client balance for guessed packets? + if packet.src == packet.endpoints[0]: + self.client_balance -= packet.client_score() + else: + self.client_balance += packet.client_score() + if packet.is_really_a_packet(): + self.packets.append(packet) + + def __str__(self): + return ("" % + (self.conversation_id, self.endpoints, self.start_time, + len(self.packets))) + + __repr__ = __str__ + + def __iter__(self): + return iter(self.packets) + + def __len__(self): + return len(self.packets) + + def get_duration(self): + if len(self.packets) < 2: + return 0 + return self.packets[-1].timestamp - self.packets[0].timestamp + + def replay_as_summary_lines(self): + return [p.as_summary(self.start_time) for p in self.packets] + + def replay_with_delay(self, start, context=None, account=None): + """Replay the conversation at the right time. + (We're already in a fork).""" + # first we sleep until the first packet + t = self.start_time + now = time.time() - start + gap = t - now + sleep_time = gap - SLEEP_OVERHEAD + if sleep_time > 0: + time.sleep(sleep_time) + + miss = (time.time() - start) - t + self.msg("starting %s [miss %.3f]" % (self, miss)) + + max_gap = 0.0 + max_sleep_miss = 0.0 + # packet times are relative to conversation start + p_start = time.time() + for p in self.packets: + now = time.time() - p_start + gap = now - p.timestamp + if gap > max_gap: + max_gap = gap + if gap < 0: + sleep_time = -gap - SLEEP_OVERHEAD + if sleep_time > 0: + time.sleep(sleep_time) + t = time.time() - p_start + if t - p.timestamp > max_sleep_miss: + max_sleep_miss = t - p.timestamp + + p.play(self, context) + + return max_gap, miss, max_sleep_miss + + def guess_client_server(self, server_clue=None): + """Have a go at deciding who is the server and who is the client. + returns (client, server) + """ + a, b = self.endpoints + + if self.client_balance < 0: + return (a, b) + + # in the absence of a clue, we will fall through to assuming + # the lowest number is the server (which is usually true). + + if self.client_balance == 0 and server_clue == b: + return (a, b) + + return (b, a) + + def forget_packets_outside_window(self, s, e): + """Prune any packets outside the time window we're interested in + + :param s: start of the window + :param e: end of the window + """ + self.packets = [p for p in self.packets if s <= p.timestamp <= e] + self.start_time = self.packets[0].timestamp if self.packets else None + + def renormalise_times(self, start_time): + """Adjust the packet start times relative to the new start time.""" + for p in self.packets: + p.timestamp -= start_time + + if self.start_time is not None: + self.start_time -= start_time + + +class DnsHammer(Conversation): + """A lightweight conversation that generates a lot of dns:0 packets on + the fly""" + + def __init__(self, dns_rate, duration, query_file=None): + n = int(dns_rate * duration) + self.times = [random.uniform(0, duration) for i in range(n)] + self.times.sort() + self.rate = dns_rate + self.duration = duration + self.start_time = 0 + self.query_choices = self._get_query_choices(query_file=query_file) + + def __str__(self): + return ("" % + (len(self.times), self.duration, self.rate)) + + def _get_query_choices(self, query_file=None): + """ + Read dns query choices from a file, or return default + + rname may contain format string like `{realm}` + realm can be fetched from context.realm + """ + + if query_file: + with open(query_file, 'r') as f: + text = f.read() + choices = [] + for line in text.splitlines(): + line = line.strip() + if line and not line.startswith('#'): + args = line.split(',') + assert len(args) == 4 + choices.append(args) + return choices + else: + return [ + (0, '{realm}', 'A', 'yes'), + (1, '{realm}', 'NS', 'yes'), + (2, '*.{realm}', 'A', 'no'), + (3, '*.{realm}', 'NS', 'no'), + (10, '_msdcs.{realm}', 'A', 'yes'), + (11, '_msdcs.{realm}', 'NS', 'yes'), + (20, 'nx.realm.com', 'A', 'no'), + (21, 'nx.realm.com', 'NS', 'no'), + (22, '*.nx.realm.com', 'A', 'no'), + (23, '*.nx.realm.com', 'NS', 'no'), + ] + + def replay(self, context=None): + assert context + assert context.realm + start = time.time() + for t in self.times: + now = time.time() - start + gap = t - now + sleep_time = gap - SLEEP_OVERHEAD + if sleep_time > 0: + time.sleep(sleep_time) + + opcode, rname, rtype, exist = random.choice(self.query_choices) + rname = rname.format(realm=context.realm) + success = True + packet_start = time.time() + try: + answers = dns_query(rname, rtype) + if exist == 'yes' and not len(answers): + # expect answers but didn't get, fail + success = False + except Exception: + success = False + finally: + end = time.time() + duration = end - packet_start + print("%f\tDNS\tdns\t%s\t%f\t%s\t" % (end, opcode, duration, success)) + + +def ingest_summaries(files, dns_mode='count'): + """Load a summary traffic summary file and generated Converations from it. + """ + + dns_counts = defaultdict(int) + packets = [] + for f in files: + if isinstance(f, str): + f = open(f) + print("Ingesting %s" % (f.name,), file=sys.stderr) + for line in f: + p = Packet.from_line(line) + if p.protocol == 'dns' and dns_mode != 'include': + dns_counts[p.opcode] += 1 + else: + packets.append(p) + + f.close() + + if not packets: + return [], 0 + + start_time = min(p.timestamp for p in packets) + last_packet = max(p.timestamp for p in packets) + + print("gathering packets into conversations", file=sys.stderr) + conversations = OrderedDict() + for i, p in enumerate(packets): + p.timestamp -= start_time + c = conversations.get(p.endpoints) + if c is None: + c = Conversation(conversation_id=(i + 2)) + conversations[p.endpoints] = c + c.add_packet(p) + + # We only care about conversations with actual traffic, so we + # filter out conversations with nothing to say. We do that here, + # rather than earlier, because those empty packets contain useful + # hints as to which end of the conversation was the client. + conversation_list = [] + for c in conversations.values(): + if len(c) != 0: + conversation_list.append(c) + + # This is obviously not correct, as many conversations will appear + # to start roughly simultaneously at the beginning of the snapshot. + # To which we say: oh well, so be it. + duration = float(last_packet - start_time) + mean_interval = len(conversations) / duration + + return conversation_list, mean_interval, duration, dns_counts + + +def guess_server_address(conversations): + # we guess the most common address. + addresses = Counter() + for c in conversations: + addresses.update(c.endpoints) + if addresses: + return addresses.most_common(1)[0] + + +def stringify_keys(x): + y = {} + for k, v in x.items(): + k2 = '\t'.join(k) + y[k2] = v + return y + + +def unstringify_keys(x): + y = {} + for k, v in x.items(): + t = tuple(str(k).split('\t')) + y[t] = v + return y + + +class TrafficModel(object): + def __init__(self, n=3): + self.ngrams = {} + self.query_details = {} + self.n = n + self.dns_opcounts = defaultdict(int) + self.cumulative_duration = 0.0 + self.packet_rate = [0, 1] + + def learn(self, conversations, dns_opcounts=None): + if dns_opcounts is None: + dns_opcounts = {} + prev = 0.0 + cum_duration = 0.0 + key = (NON_PACKET,) * (self.n - 1) + + server = guess_server_address(conversations) + + for k, v in dns_opcounts.items(): + self.dns_opcounts[k] += v + + if len(conversations) > 1: + first = conversations[0].start_time + total = 0 + last = first + 0.1 + for c in conversations: + total += len(c) + last = max(last, c.packets[-1].timestamp) + + self.packet_rate[0] = total + self.packet_rate[1] = last - first + + for c in conversations: + client, server = c.guess_client_server(server) + cum_duration += c.get_duration() + key = (NON_PACKET,) * (self.n - 1) + for p in c: + if p.src != client: + continue + + elapsed = p.timestamp - prev + prev = p.timestamp + if elapsed > WAIT_THRESHOLD: + # add the wait as an extra state + wait = 'wait:%d' % (math.log(max(1.0, + elapsed * WAIT_SCALE))) + self.ngrams.setdefault(key, []).append(wait) + key = key[1:] + (wait,) + + short_p = p.as_packet_type() + self.query_details.setdefault(short_p, + []).append(tuple(p.extra)) + self.ngrams.setdefault(key, []).append(short_p) + key = key[1:] + (short_p,) + + self.cumulative_duration += cum_duration + # add in the end + self.ngrams.setdefault(key, []).append(NON_PACKET) + + def save(self, f): + ngrams = {} + for k, v in self.ngrams.items(): + k = '\t'.join(k) + ngrams[k] = dict(Counter(v)) + + query_details = {} + for k, v in self.query_details.items(): + query_details[k] = dict(Counter('\t'.join(x) if x else '-' + for x in v)) + + d = { + 'ngrams': ngrams, + 'query_details': query_details, + 'cumulative_duration': self.cumulative_duration, + 'packet_rate': self.packet_rate, + 'version': CURRENT_MODEL_VERSION + } + d['dns'] = self.dns_opcounts + + if isinstance(f, str): + f = open(f, 'w') + + json.dump(d, f, indent=2) + + def load(self, f): + if isinstance(f, str): + f = open(f) + + d = json.load(f) + + try: + version = d["version"] + if version < REQUIRED_MODEL_VERSION: + raise ValueError("the model file is version %d; " + "version %d is required" % + (version, REQUIRED_MODEL_VERSION)) + except KeyError: + raise ValueError("the model file lacks a version number; " + "version %d is required" % + (REQUIRED_MODEL_VERSION)) + + for k, v in d['ngrams'].items(): + k = tuple(str(k).split('\t')) + values = self.ngrams.setdefault(k, []) + for p, count in v.items(): + values.extend([str(p)] * count) + values.sort() + + for k, v in d['query_details'].items(): + values = self.query_details.setdefault(str(k), []) + for p, count in v.items(): + if p == '-': + values.extend([()] * count) + else: + values.extend([tuple(str(p).split('\t'))] * count) + values.sort() + + if 'dns' in d: + for k, v in d['dns'].items(): + self.dns_opcounts[k] += v + + self.cumulative_duration = d['cumulative_duration'] + self.packet_rate = d['packet_rate'] + + def construct_conversation_sequence(self, timestamp=0.0, + hard_stop=None, + replay_speed=1, + ignore_before=0, + persistence=0): + """Construct an individual conversation packet sequence from the + model. + """ + c = [] + key = (NON_PACKET,) * (self.n - 1) + if ignore_before is None: + ignore_before = timestamp - 1 + + while True: + p = random.choice(self.ngrams.get(key, (NON_PACKET,))) + if p == NON_PACKET: + if timestamp < ignore_before: + break + if random.random() > persistence: + print("ending after %s (persistence %.1f)" % (key, persistence), + file=sys.stderr) + break + + p = 'wait:%d' % random.randrange(5, 12) + print("trying %s instead of end" % p, file=sys.stderr) + + if p in self.query_details: + extra = random.choice(self.query_details[p]) + else: + extra = [] + + protocol, opcode = p.split(':', 1) + if protocol == 'wait': + log_wait_time = int(opcode) + random.random() + wait = math.exp(log_wait_time) / (WAIT_SCALE * replay_speed) + timestamp += wait + else: + log_wait = random.uniform(*NO_WAIT_LOG_TIME_RANGE) + wait = math.exp(log_wait) / replay_speed + timestamp += wait + if hard_stop is not None and timestamp > hard_stop: + break + if timestamp >= ignore_before: + c.append((timestamp, protocol, opcode, extra)) + + key = key[1:] + (p,) + if key[-2][:5] == 'wait:' and key[-1][:5] == 'wait:': + # two waits in a row can only be caused by "persistence" + # tricks, and will not result in any packets being found. + # Instead we pretend this is a fresh start. + key = (NON_PACKET,) * (self.n - 1) + + return c + + def scale_to_packet_rate(self, scale): + rate_n, rate_t = self.packet_rate + return scale * rate_n / rate_t + + def packet_rate_to_scale(self, pps): + rate_n, rate_t = self.packet_rate + return pps * rate_t / rate_n + + def generate_conversation_sequences(self, packet_rate, duration, replay_speed=1, + persistence=0): + """Generate a list of conversation descriptions from the model.""" + + # We run the simulation for ten times as long as our desired + # duration, and take the section at the end. + lead_in = 9 * duration + target_packets = int(packet_rate * duration) + conversations = [] + n_packets = 0 + + while n_packets < target_packets: + start = random.uniform(-lead_in, duration) + c = self.construct_conversation_sequence(start, + hard_stop=duration, + replay_speed=replay_speed, + ignore_before=0, + persistence=persistence) + # will these "packets" generate actual traffic? + # some (e.g. ldap unbind) will not generate anything + # if the previous packets are not there, and if the + # conversation only has those it wastes a process doing nothing. + for timestamp, protocol, opcode, extra in c: + if is_a_traffic_generating_packet(protocol, opcode): + break + else: + continue + + conversations.append(c) + n_packets += len(c) + + scale = self.packet_rate_to_scale(packet_rate) + print(("we have %d packets (target %d) in %d conversations at %.1f/s " + "(scale %f)" % (n_packets, target_packets, len(conversations), + packet_rate, scale)), + file=sys.stderr) + conversations.sort() # sorts by first element == start time + return conversations + + +def seq_to_conversations(seq, server=1, client=2): + conversations = [] + for s in seq: + if s: + c = Conversation(s[0][0], (server, client), s) + client += 1 + conversations.append(c) + return conversations + + +IP_PROTOCOLS = { + 'dns': '11', + 'rpc_netlogon': '06', + 'kerberos': '06', # ratio 16248:258 + 'smb': '06', + 'smb2': '06', + 'ldap': '06', + 'cldap': '11', + 'lsarpc': '06', + 'samr': '06', + 'dcerpc': '06', + 'epm': '06', + 'drsuapi': '06', + 'browser': '11', + 'smb_netlogon': '11', + 'srvsvc': '06', + 'nbns': '11', +} + +OP_DESCRIPTIONS = { + ('browser', '0x01'): 'Host Announcement (0x01)', + ('browser', '0x02'): 'Request Announcement (0x02)', + ('browser', '0x08'): 'Browser Election Request (0x08)', + ('browser', '0x09'): 'Get Backup List Request (0x09)', + ('browser', '0x0c'): 'Domain/Workgroup Announcement (0x0c)', + ('browser', '0x0f'): 'Local Master Announcement (0x0f)', + ('cldap', '3'): 'searchRequest', + ('cldap', '5'): 'searchResDone', + ('dcerpc', '0'): 'Request', + ('dcerpc', '11'): 'Bind', + ('dcerpc', '12'): 'Bind_ack', + ('dcerpc', '13'): 'Bind_nak', + ('dcerpc', '14'): 'Alter_context', + ('dcerpc', '15'): 'Alter_context_resp', + ('dcerpc', '16'): 'AUTH3', + ('dcerpc', '2'): 'Response', + ('dns', '0'): 'query', + ('dns', '1'): 'response', + ('drsuapi', '0'): 'DsBind', + ('drsuapi', '12'): 'DsCrackNames', + ('drsuapi', '13'): 'DsWriteAccountSpn', + ('drsuapi', '1'): 'DsUnbind', + ('drsuapi', '2'): 'DsReplicaSync', + ('drsuapi', '3'): 'DsGetNCChanges', + ('drsuapi', '4'): 'DsReplicaUpdateRefs', + ('epm', '3'): 'Map', + ('kerberos', ''): '', + ('ldap', '0'): 'bindRequest', + ('ldap', '1'): 'bindResponse', + ('ldap', '2'): 'unbindRequest', + ('ldap', '3'): 'searchRequest', + ('ldap', '4'): 'searchResEntry', + ('ldap', '5'): 'searchResDone', + ('ldap', ''): '*** Unknown ***', + ('lsarpc', '14'): 'lsa_LookupNames', + ('lsarpc', '15'): 'lsa_LookupSids', + ('lsarpc', '39'): 'lsa_QueryTrustedDomainInfoBySid', + ('lsarpc', '40'): 'lsa_SetTrustedDomainInfo', + ('lsarpc', '6'): 'lsa_OpenPolicy', + ('lsarpc', '76'): 'lsa_LookupSids3', + ('lsarpc', '77'): 'lsa_LookupNames4', + ('nbns', '0'): 'query', + ('nbns', '1'): 'response', + ('rpc_netlogon', '21'): 'NetrLogonDummyRoutine1', + ('rpc_netlogon', '26'): 'NetrServerAuthenticate3', + ('rpc_netlogon', '29'): 'NetrLogonGetDomainInfo', + ('rpc_netlogon', '30'): 'NetrServerPasswordSet2', + ('rpc_netlogon', '39'): 'NetrLogonSamLogonEx', + ('rpc_netlogon', '40'): 'DsrEnumerateDomainTrusts', + ('rpc_netlogon', '45'): 'NetrLogonSamLogonWithFlags', + ('rpc_netlogon', '4'): 'NetrServerReqChallenge', + ('samr', '0',): 'Connect', + ('samr', '16'): 'GetAliasMembership', + ('samr', '17'): 'LookupNames', + ('samr', '18'): 'LookupRids', + ('samr', '19'): 'OpenGroup', + ('samr', '1'): 'Close', + ('samr', '25'): 'QueryGroupMember', + ('samr', '34'): 'OpenUser', + ('samr', '36'): 'QueryUserInfo', + ('samr', '39'): 'GetGroupsForUser', + ('samr', '3'): 'QuerySecurity', + ('samr', '5'): 'LookupDomain', + ('samr', '64'): 'Connect5', + ('samr', '6'): 'EnumDomains', + ('samr', '7'): 'OpenDomain', + ('samr', '8'): 'QueryDomainInfo', + ('smb', '0x04'): 'Close (0x04)', + ('smb', '0x24'): 'Locking AndX (0x24)', + ('smb', '0x2e'): 'Read AndX (0x2e)', + ('smb', '0x32'): 'Trans2 (0x32)', + ('smb', '0x71'): 'Tree Disconnect (0x71)', + ('smb', '0x72'): 'Negotiate Protocol (0x72)', + ('smb', '0x73'): 'Session Setup AndX (0x73)', + ('smb', '0x74'): 'Logoff AndX (0x74)', + ('smb', '0x75'): 'Tree Connect AndX (0x75)', + ('smb', '0xa2'): 'NT Create AndX (0xa2)', + ('smb2', '0'): 'NegotiateProtocol', + ('smb2', '11'): 'Ioctl', + ('smb2', '14'): 'Find', + ('smb2', '16'): 'GetInfo', + ('smb2', '18'): 'Break', + ('smb2', '1'): 'SessionSetup', + ('smb2', '2'): 'SessionLogoff', + ('smb2', '3'): 'TreeConnect', + ('smb2', '4'): 'TreeDisconnect', + ('smb2', '5'): 'Create', + ('smb2', '6'): 'Close', + ('smb2', '8'): 'Read', + ('smb_netlogon', '0x12'): 'SAM LOGON request from client (0x12)', + ('smb_netlogon', '0x17'): ('SAM Active Directory Response - ' + 'user unknown (0x17)'), + ('srvsvc', '16'): 'NetShareGetInfo', + ('srvsvc', '21'): 'NetSrvGetInfo', +} + + +def expand_short_packet(p, timestamp, src, dest, extra): + protocol, opcode = p.split(':', 1) + desc = OP_DESCRIPTIONS.get((protocol, opcode), '') + ip_protocol = IP_PROTOCOLS.get(protocol, '06') + + line = [timestamp, ip_protocol, '', src, dest, protocol, opcode, desc] + line.extend(extra) + return '\t'.join(line) + + +def flushing_signal_handler(signal, frame): + """Signal handler closes standard out and error. + + Triggered by a sigterm, ensures that the log messages are flushed + to disk and not lost. + """ + sys.stderr.close() + sys.stdout.close() + os._exit(0) + + +def replay_seq_in_fork(cs, start, context, account, client_id, server_id=1): + """Fork a new process and replay the conversation sequence.""" + # We will need to reseed the random number generator or all the + # clients will end up using the same sequence of random + # numbers. random.randint() is mixed in so the initial seed will + # have an effect here. + seed = client_id * 1000 + random.randint(0, 999) + + # flush our buffers so messages won't be written by both sides + sys.stdout.flush() + sys.stderr.flush() + pid = os.fork() + if pid != 0: + return pid + + # we must never return, or we'll end up running parts of the + # parent's clean-up code. So we work in a try...finally, and + # try to print any exceptions. + try: + random.seed(seed) + endpoints = (server_id, client_id) + status = 0 + t = cs[0][0] + c = Conversation(t, endpoints, seq=cs, conversation_id=client_id) + signal.signal(signal.SIGTERM, flushing_signal_handler) + + context.generate_process_local_config(account, c) + sys.stdin.close() + os.close(0) + filename = os.path.join(context.statsdir, 'stats-conversation-%d' % + c.conversation_id) + f = open(filename, 'w') + try: + sys.stdout.close() + os.close(1) + except IOError as e: + LOGGER.info("stdout closing failed with %s" % e) + + sys.stdout = f + now = time.time() - start + gap = t - now + sleep_time = gap - SLEEP_OVERHEAD + if sleep_time > 0: + time.sleep(sleep_time) + + max_lag, start_lag, max_sleep_miss = c.replay_with_delay(start=start, + context=context) + print("Maximum lag: %f" % max_lag) + print("Start lag: %f" % start_lag) + print("Max sleep miss: %f" % max_sleep_miss) + + except Exception: + status = 1 + print(("EXCEPTION in child PID %d, conversation %s" % (os.getpid(), c)), + file=sys.stderr) + traceback.print_exc(sys.stderr) + sys.stderr.flush() + finally: + sys.stderr.close() + sys.stdout.close() + os._exit(status) + + +def dnshammer_in_fork(dns_rate, duration, context, query_file=None): + sys.stdout.flush() + sys.stderr.flush() + pid = os.fork() + if pid != 0: + return pid + + sys.stdin.close() + os.close(0) + + try: + sys.stdout.close() + os.close(1) + except IOError as e: + LOGGER.warn("stdout closing failed with %s" % e) + filename = os.path.join(context.statsdir, 'stats-dns') + sys.stdout = open(filename, 'w') + + try: + status = 0 + signal.signal(signal.SIGTERM, flushing_signal_handler) + hammer = DnsHammer(dns_rate, duration, query_file=query_file) + hammer.replay(context=context) + except Exception: + status = 1 + print(("EXCEPTION in child PID %d, the DNS hammer" % (os.getpid())), + file=sys.stderr) + traceback.print_exc(sys.stderr) + finally: + sys.stderr.close() + sys.stdout.close() + os._exit(status) + + +def replay(conversation_seq, + host=None, + creds=None, + lp=None, + accounts=None, + dns_rate=0, + dns_query_file=None, + duration=None, + latency_timeout=1.0, + stop_on_any_error=False, + **kwargs): + + context = ReplayContext(server=host, + creds=creds, + lp=lp, + total_conversations=len(conversation_seq), + **kwargs) + + if len(accounts) < len(conversation_seq): + raise ValueError(("we have %d accounts but %d conversations" % + (len(accounts), len(conversation_seq)))) + + # Set the process group so that the calling scripts are not killed + # when the forked child processes are killed. + os.setpgrp() + + # we delay the start by a bit to allow all the forks to get up and + # running. + delay = len(conversation_seq) * 0.02 + start = time.time() + delay + + if duration is None: + # end slightly after the last packet of the last conversation + # to start. Conversations other than the last could still be + # going, but we don't care. + duration = conversation_seq[-1][-1][0] + latency_timeout + + print("We will start in %.1f seconds" % delay, + file=sys.stderr) + print("We will stop after %.1f seconds" % (duration + delay), + file=sys.stderr) + print("runtime %.1f seconds" % duration, + file=sys.stderr) + + # give one second grace for packets to finish before killing begins + end = start + duration + 1.0 + + LOGGER.info("Replaying traffic for %u conversations over %d seconds" + % (len(conversation_seq), duration)) + + context.write_stats('intentions', + Planned_conversations=len(conversation_seq), + Planned_packets=sum(len(x) for x in conversation_seq)) + + children = {} + try: + if dns_rate: + pid = dnshammer_in_fork(dns_rate, duration, context, + query_file=dns_query_file) + children[pid] = 1 + + for i, cs in enumerate(conversation_seq): + account = accounts[i] + client_id = i + 2 + pid = replay_seq_in_fork(cs, start, context, account, client_id) + children[pid] = client_id + + # HERE, we are past all the forks + t = time.time() + print("all forks done in %.1f seconds, waiting %.1f" % + (t - start + delay, t - start), + file=sys.stderr) + + while time.time() < end and children: + time.sleep(0.003) + try: + pid, status = os.waitpid(-1, os.WNOHANG) + except OSError as e: + if e.errno != ECHILD: # no child processes + raise + break + if pid: + c = children.pop(pid, None) + if DEBUG_LEVEL > 0: + print(("process %d finished conversation %d;" + " %d to go" % + (pid, c, len(children))), file=sys.stderr) + if stop_on_any_error and status != 0: + break + + except Exception: + print("EXCEPTION in parent", file=sys.stderr) + traceback.print_exc() + finally: + context.write_stats('unfinished', + Unfinished_conversations=len(children)) + + for s in (15, 15, 9): + print(("killing %d children with -%d" % + (len(children), s)), file=sys.stderr) + for pid in children: + try: + os.kill(pid, s) + except OSError as e: + if e.errno != ESRCH: # don't fail if it has already died + raise + time.sleep(0.5) + end = time.time() + 1 + while children: + try: + pid, status = os.waitpid(-1, os.WNOHANG) + except OSError as e: + if e.errno != ECHILD: + raise + if pid != 0: + c = children.pop(pid, None) + if c is None: + print("children is %s, no pid found" % children) + sys.stderr.flush() + sys.stdout.flush() + os._exit(1) + print(("kill -%d %d KILLED conversation; " + "%d to go" % + (s, pid, len(children))), + file=sys.stderr) + if time.time() >= end: + break + + if not children: + break + time.sleep(1) + + if children: + print("%d children are missing" % len(children), + file=sys.stderr) + + # there may be stragglers that were forked just as ^C was hit + # and don't appear in the list of children. We can get them + # with killpg, but that will also kill us, so this is^H^H would be + # goodbye, except we cheat and pretend to use ^C (SIG_INTERRUPT), + # so as not to have to fuss around writing signal handlers. + try: + os.killpg(0, 2) + except KeyboardInterrupt: + print("ignoring fake ^C", file=sys.stderr) + + +def openLdb(host, creds, lp): + session = system_session() + ldb = SamDB(url="ldap://%s" % host, + session_info=session, + options=['modules:paged_searches'], + credentials=creds, + lp=lp) + return ldb + + +def ou_name(ldb, instance_id): + """Generate an ou name from the instance id""" + return "ou=instance-%d,ou=traffic_replay,%s" % (instance_id, + ldb.domain_dn()) + + +def create_ou(ldb, instance_id): + """Create an ou, all created user and machine accounts will belong to it. + + This allows all the created resources to be cleaned up easily. + """ + ou = ou_name(ldb, instance_id) + try: + ldb.add({"dn": ou.split(',', 1)[1], + "objectclass": "organizationalunit"}) + except LdbError as e: + (status, _) = e.args + # ignore already exists + if status != 68: + raise + try: + ldb.add({"dn": ou, + "objectclass": "organizationalunit"}) + except LdbError as e: + (status, _) = e.args + # ignore already exists + if status != 68: + raise + return ou + + +# ConversationAccounts holds details of the machine and user accounts +# associated with a conversation. +# +# We use a named tuple to reduce shared memory usage. +ConversationAccounts = namedtuple('ConversationAccounts', + ('netbios_name', + 'machinepass', + 'username', + 'userpass')) + + +def generate_replay_accounts(ldb, instance_id, number, password): + """Generate a series of unique machine and user account names.""" + + accounts = [] + for i in range(1, number + 1): + netbios_name = machine_name(instance_id, i) + username = user_name(instance_id, i) + + account = ConversationAccounts(netbios_name, password, username, + password) + accounts.append(account) + return accounts + + +def create_machine_account(ldb, instance_id, netbios_name, machinepass, + traffic_account=True): + """Create a machine account via ldap.""" + + ou = ou_name(ldb, instance_id) + dn = "cn=%s,%s" % (netbios_name, ou) + utf16pw = ('"%s"' % get_string(machinepass)).encode('utf-16-le') + + if traffic_account: + # we set these bits for the machine account otherwise the replayed + # traffic throws up NT_STATUS_NO_TRUST_SAM_ACCOUNT errors + account_controls = str(UF_TRUSTED_FOR_DELEGATION | + UF_SERVER_TRUST_ACCOUNT) + + else: + account_controls = str(UF_WORKSTATION_TRUST_ACCOUNT) + + ldb.add({ + "dn": dn, + "objectclass": "computer", + "sAMAccountName": "%s$" % netbios_name, + "userAccountControl": account_controls, + "unicodePwd": utf16pw}) + + +def create_user_account(ldb, instance_id, username, userpass): + """Create a user account via ldap.""" + ou = ou_name(ldb, instance_id) + user_dn = "cn=%s,%s" % (username, ou) + utf16pw = ('"%s"' % get_string(userpass)).encode('utf-16-le') + ldb.add({ + "dn": user_dn, + "objectclass": "user", + "sAMAccountName": username, + "userAccountControl": str(UF_NORMAL_ACCOUNT), + "unicodePwd": utf16pw + }) + + # grant user write permission to do things like write account SPN + sdutils = sd_utils.SDUtils(ldb) + sdutils.dacl_add_ace(user_dn, "(A;;WP;;;PS)") + + +def create_group(ldb, instance_id, name): + """Create a group via ldap.""" + + ou = ou_name(ldb, instance_id) + dn = "cn=%s,%s" % (name, ou) + ldb.add({ + "dn": dn, + "objectclass": "group", + "sAMAccountName": name, + }) + + +def user_name(instance_id, i): + """Generate a user name based in the instance id""" + return "STGU-%d-%d" % (instance_id, i) + + +def search_objectclass(ldb, objectclass='user', attr='sAMAccountName'): + """Search objectclass, return attr in a set""" + objs = ldb.search( + expression="(objectClass={})".format(objectclass), + attrs=[attr] + ) + return {str(obj[attr]) for obj in objs} + + +def generate_users(ldb, instance_id, number, password): + """Add users to the server""" + existing_objects = search_objectclass(ldb, objectclass='user') + users = 0 + for i in range(number, 0, -1): + name = user_name(instance_id, i) + if name not in existing_objects: + create_user_account(ldb, instance_id, name, password) + users += 1 + if users % 50 == 0: + LOGGER.info("Created %u/%u users" % (users, number)) + + return users + + +def machine_name(instance_id, i, traffic_account=True): + """Generate a machine account name from instance id.""" + if traffic_account: + # traffic accounts correspond to a given user, and use different + # userAccountControl flags to ensure packets get processed correctly + # by the DC + return "STGM-%d-%d" % (instance_id, i) + else: + # Otherwise we're just generating computer accounts to simulate a + # semi-realistic network. These use the default computer + # userAccountControl flags, so we use a different account name so that + # we don't try to use them when generating packets + return "PC-%d-%d" % (instance_id, i) + + +def generate_machine_accounts(ldb, instance_id, number, password, + traffic_account=True): + """Add machine accounts to the server""" + existing_objects = search_objectclass(ldb, objectclass='computer') + added = 0 + for i in range(number, 0, -1): + name = machine_name(instance_id, i, traffic_account) + if name + "$" not in existing_objects: + create_machine_account(ldb, instance_id, name, password, + traffic_account) + added += 1 + if added % 50 == 0: + LOGGER.info("Created %u/%u machine accounts" % (added, number)) + + return added + + +def group_name(instance_id, i): + """Generate a group name from instance id.""" + return "STGG-%d-%d" % (instance_id, i) + + +def generate_groups(ldb, instance_id, number): + """Create the required number of groups on the server.""" + existing_objects = search_objectclass(ldb, objectclass='group') + groups = 0 + for i in range(number, 0, -1): + name = group_name(instance_id, i) + if name not in existing_objects: + create_group(ldb, instance_id, name) + groups += 1 + if groups % 1000 == 0: + LOGGER.info("Created %u/%u groups" % (groups, number)) + + return groups + + +def clean_up_accounts(ldb, instance_id): + """Remove the created accounts and groups from the server.""" + ou = ou_name(ldb, instance_id) + try: + ldb.delete(ou, ["tree_delete:1"]) + except LdbError as e: + (status, _) = e.args + # ignore does not exist + if status != 32: + raise + + +def generate_users_and_groups(ldb, instance_id, password, + number_of_users, number_of_groups, + group_memberships, max_members, + machine_accounts, traffic_accounts=True): + """Generate the required users and groups, allocating the users to + those groups.""" + memberships_added = 0 + groups_added = 0 + computers_added = 0 + + create_ou(ldb, instance_id) + + LOGGER.info("Generating dummy user accounts") + users_added = generate_users(ldb, instance_id, number_of_users, password) + + LOGGER.info("Generating dummy machine accounts") + computers_added = generate_machine_accounts(ldb, instance_id, + machine_accounts, password, + traffic_accounts) + + if number_of_groups > 0: + LOGGER.info("Generating dummy groups") + groups_added = generate_groups(ldb, instance_id, number_of_groups) + + if group_memberships > 0: + LOGGER.info("Assigning users to groups") + assignments = GroupAssignments(number_of_groups, + groups_added, + number_of_users, + users_added, + group_memberships, + max_members) + LOGGER.info("Adding users to groups") + add_users_to_groups(ldb, instance_id, assignments) + memberships_added = assignments.total() + + if (groups_added > 0 and users_added == 0 and + number_of_groups != groups_added): + LOGGER.warning("The added groups will contain no members") + + LOGGER.info("Added %d users (%d machines), %d groups and %d memberships" % + (users_added, computers_added, groups_added, + memberships_added)) + + +class GroupAssignments(object): + def __init__(self, number_of_groups, groups_added, number_of_users, + users_added, group_memberships, max_members): + + self.count = 0 + self.generate_group_distribution(number_of_groups) + self.generate_user_distribution(number_of_users, group_memberships) + self.max_members = max_members + self.assignments = defaultdict(list) + self.assign_groups(number_of_groups, groups_added, number_of_users, + users_added, group_memberships) + + def cumulative_distribution(self, weights): + # make sure the probabilities conform to a cumulative distribution + # spread between 0.0 and 1.0. Dividing by the weighted total gives each + # probability a proportional share of 1.0. Higher probabilities get a + # bigger share, so are more likely to be picked. We use the cumulative + # value, so we can use random.random() as a simple index into the list + dist = [] + total = sum(weights) + if total == 0: + return None + + cumulative = 0.0 + for probability in weights: + cumulative += probability + dist.append(cumulative / total) + return dist + + def generate_user_distribution(self, num_users, num_memberships): + """Probability distribution of a user belonging to a group. + """ + # Assign a weighted probability to each user. Use the Pareto + # Distribution so that some users are in a lot of groups, and the + # bulk of users are in only a few groups. If we're assigning a large + # number of group memberships, use a higher shape. This means slightly + # fewer outlying users that are in large numbers of groups. The aim is + # to have no users belonging to more than ~500 groups. + if num_memberships > 5000000: + shape = 3.0 + elif num_memberships > 2000000: + shape = 2.5 + elif num_memberships > 300000: + shape = 2.25 + else: + shape = 1.75 + + weights = [] + for x in range(1, num_users + 1): + p = random.paretovariate(shape) + weights.append(p) + + # convert the weights to a cumulative distribution between 0.0 and 1.0 + self.user_dist = self.cumulative_distribution(weights) + + def generate_group_distribution(self, n): + """Probability distribution of a group containing a user.""" + + # Assign a weighted probability to each user. Probability decreases + # as the group-ID increases + weights = [] + for x in range(1, n + 1): + p = 1 / (x**1.3) + weights.append(p) + + # convert the weights to a cumulative distribution between 0.0 and 1.0 + self.group_weights = weights + self.group_dist = self.cumulative_distribution(weights) + + def generate_random_membership(self): + """Returns a randomly generated user-group membership""" + + # the list items are cumulative distribution values between 0.0 and + # 1.0, which makes random() a handy way to index the list to get a + # weighted random user/group. (Here the user/group returned are + # zero-based array indexes) + user = bisect.bisect(self.user_dist, random.random()) + group = bisect.bisect(self.group_dist, random.random()) + + return user, group + + def users_in_group(self, group): + return self.assignments[group] + + def get_groups(self): + return self.assignments.keys() + + def cap_group_membership(self, group, max_members): + """Prevent the group's membership from exceeding the max specified""" + num_members = len(self.assignments[group]) + if num_members >= max_members: + LOGGER.info("Group {0} has {1} members".format(group, num_members)) + + # remove this group and then recalculate the cumulative + # distribution, so this group is no longer selected + self.group_weights[group - 1] = 0 + new_dist = self.cumulative_distribution(self.group_weights) + self.group_dist = new_dist + + def add_assignment(self, user, group): + # the assignments are stored in a dictionary where key=group, + # value=list-of-users-in-group (indexing by group-ID allows us to + # optimize for DB membership writes) + if user not in self.assignments[group]: + self.assignments[group].append(user) + self.count += 1 + + # check if there'a cap on how big the groups can grow + if self.max_members: + self.cap_group_membership(group, self.max_members) + + def assign_groups(self, number_of_groups, groups_added, + number_of_users, users_added, group_memberships): + """Allocate users to groups. + + The intention is to have a few users that belong to most groups, while + the majority of users belong to a few groups. + + A few groups will contain most users, with the remaining only having a + few users. + """ + + if group_memberships <= 0: + return + + # Calculate the number of group menberships required + group_memberships = math.ceil( + float(group_memberships) * + (float(users_added) / float(number_of_users))) + + if self.max_members: + group_memberships = min(group_memberships, + self.max_members * number_of_groups) + + existing_users = number_of_users - users_added - 1 + existing_groups = number_of_groups - groups_added - 1 + while self.total() < group_memberships: + user, group = self.generate_random_membership() + + if group > existing_groups or user > existing_users: + # the + 1 converts the array index to the corresponding + # group or user number + self.add_assignment(user + 1, group + 1) + + def total(self): + return self.count + + +def add_users_to_groups(db, instance_id, assignments): + """Takes the assignments of users to groups and applies them to the DB.""" + + total = assignments.total() + count = 0 + added = 0 + + for group in assignments.get_groups(): + users_in_group = assignments.users_in_group(group) + if len(users_in_group) == 0: + continue + + # Split up the users into chunks, so we write no more than 1K at a + # time. (Minimizing the DB modifies is more efficient, but writing + # 10K+ users to a single group becomes inefficient memory-wise) + for chunk in range(0, len(users_in_group), 1000): + chunk_of_users = users_in_group[chunk:chunk + 1000] + add_group_members(db, instance_id, group, chunk_of_users) + + added += len(chunk_of_users) + count += 1 + if count % 50 == 0: + LOGGER.info("Added %u/%u memberships" % (added, total)) + +def add_group_members(db, instance_id, group, users_in_group): + """Adds the given users to group specified.""" + + ou = ou_name(db, instance_id) + + def build_dn(name): + return("cn=%s,%s" % (name, ou)) + + group_dn = build_dn(group_name(instance_id, group)) + m = ldb.Message() + m.dn = ldb.Dn(db, group_dn) + + for user in users_in_group: + user_dn = build_dn(user_name(instance_id, user)) + idx = "member-" + str(user) + m[idx] = ldb.MessageElement(user_dn, ldb.FLAG_MOD_ADD, "member") + + db.modify(m) + + +def generate_stats(statsdir, timing_file): + """Generate and print the summary stats for a run.""" + first = sys.float_info.max + last = 0 + successful = 0 + failed = 0 + latencies = {} + failures = Counter() + unique_conversations = set() + if timing_file is not None: + tw = timing_file.write + else: + def tw(x): + pass + + tw("time\tconv\tprotocol\ttype\tduration\tsuccessful\terror\n") + + float_values = { + 'Maximum lag': 0, + 'Start lag': 0, + 'Max sleep miss': 0, + } + int_values = { + 'Planned_conversations': 0, + 'Planned_packets': 0, + 'Unfinished_conversations': 0, + } + + for filename in os.listdir(statsdir): + path = os.path.join(statsdir, filename) + with open(path, 'r') as f: + for line in f: + try: + fields = line.rstrip('\n').split('\t') + conversation = fields[1] + protocol = fields[2] + packet_type = fields[3] + latency = float(fields[4]) + t = float(fields[0]) + first = min(t - latency, first) + last = max(t, last) + + op = (protocol, packet_type) + latencies.setdefault(op, []).append(latency) + if fields[5] == 'True': + successful += 1 + else: + failed += 1 + failures[op] += 1 + + unique_conversations.add(conversation) + + tw(line) + except (ValueError, IndexError): + if ':' in line: + k, v = line.split(':', 1) + if k in float_values: + float_values[k] = max(float(v), + float_values[k]) + elif k in int_values: + int_values[k] = max(int(v), + int_values[k]) + else: + print(line, file=sys.stderr) + else: + # not a valid line print and ignore + print(line, file=sys.stderr) + + duration = last - first + if successful == 0: + success_rate = 0 + else: + success_rate = successful / duration + if failed == 0: + failure_rate = 0 + else: + failure_rate = failed / duration + + conversations = len(unique_conversations) + + print("Total conversations: %10d" % conversations) + print("Successful operations: %10d (%.3f per second)" + % (successful, success_rate)) + print("Failed operations: %10d (%.3f per second)" + % (failed, failure_rate)) + + for k, v in sorted(float_values.items()): + print("%-28s %f" % (k.replace('_', ' ') + ':', v)) + for k, v in sorted(int_values.items()): + print("%-28s %d" % (k.replace('_', ' ') + ':', v)) + + print("Protocol Op Code Description " + " Count Failed Mean Median " + "95% Range Max") + + ops = {} + for proto, packet in latencies: + if proto not in ops: + ops[proto] = set() + ops[proto].add(packet) + protocols = sorted(ops.keys()) + + for protocol in protocols: + packet_types = sorted(ops[protocol], key=opcode_key) + for packet_type in packet_types: + op = (protocol, packet_type) + values = latencies[op] + values = sorted(values) + count = len(values) + failed = failures[op] + mean = sum(values) / count + median = calc_percentile(values, 0.50) + percentile = calc_percentile(values, 0.95) + rng = values[-1] - values[0] + maxv = values[-1] + desc = OP_DESCRIPTIONS.get(op, '') + print("%-12s %4s %-35s %12d %12d %12.6f " + "%12.6f %12.6f %12.6f %12.6f" + % (protocol, + packet_type, + desc, + count, + failed, + mean, + median, + percentile, + rng, + maxv)) + + +def opcode_key(v): + """Sort key for the operation code to ensure that it sorts numerically""" + try: + return "%03d" % int(v) + except ValueError: + return v + + +def calc_percentile(values, percentile): + """Calculate the specified percentile from the list of values. + + Assumes the list is sorted in ascending order. + """ + + if not values: + return 0 + k = (len(values) - 1) * percentile + f = math.floor(k) + c = math.ceil(k) + if f == c: + return values[int(k)] + d0 = values[int(f)] * (c - k) + d1 = values[int(c)] * (k - f) + return d0 + d1 + + +def mk_masked_dir(*path): + """In a testenv we end up with 0777 directories that look an alarming + green colour with ls. Use umask to avoid that.""" + # py3 os.mkdir can do this + d = os.path.join(*path) + mask = os.umask(0o077) + os.mkdir(d) + os.umask(mask) + return d diff --git a/python/samba/emulate/traffic_packets.py b/python/samba/emulate/traffic_packets.py new file mode 100644 index 0000000..95c7465 --- /dev/null +++ b/python/samba/emulate/traffic_packets.py @@ -0,0 +1,973 @@ +# Dispatch for various request types. +# +# Copyright (C) Catalyst IT Ltd. 2017 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +import os +import ctypes +import random + +from samba.net import Net +from samba.dcerpc import security, drsuapi, nbt, lsa, netlogon, ntlmssp +from samba.dcerpc.netlogon import netr_WorkstationInformation +from samba.dcerpc.security import dom_sid +from samba.netbios import Node +from samba.ndr import ndr_pack +from samba.credentials import ( + CLI_CRED_NTLMv2_AUTH, + MUST_USE_KERBEROS, + DONT_USE_KERBEROS +) +from samba import NTSTATUSError +from samba.ntstatus import ( + NT_STATUS_OBJECT_NAME_NOT_FOUND, + NT_STATUS_NO_SUCH_DOMAIN +) +import samba +import dns.resolver +from ldb import SCOPE_BASE + +def uint32(v): + return ctypes.c_uint32(v).value + + +def check_runtime_error(runtime, val): + if runtime is None: + return False + + err32 = uint32(runtime.args[0]) + if err32 == val: + return True + + return False + + +name_formats = [ + drsuapi.DRSUAPI_DS_NAME_FORMAT_FQDN_1779, + drsuapi.DRSUAPI_DS_NAME_FORMAT_NT4_ACCOUNT, + drsuapi.DRSUAPI_DS_NAME_FORMAT_DISPLAY, + drsuapi.DRSUAPI_DS_NAME_FORMAT_GUID, + drsuapi.DRSUAPI_DS_NAME_FORMAT_CANONICAL, + drsuapi.DRSUAPI_DS_NAME_FORMAT_USER_PRINCIPAL, + drsuapi.DRSUAPI_DS_NAME_FORMAT_CANONICAL_EX, + drsuapi.DRSUAPI_DS_NAME_FORMAT_SERVICE_PRINCIPAL, + drsuapi.DRSUAPI_DS_NAME_FORMAT_SID_OR_SID_HISTORY, + drsuapi.DRSUAPI_DS_NAME_FORMAT_DNS_DOMAIN, + drsuapi.DRSUAPI_DS_NAME_FORMAT_UPN_AND_ALTSECID, + drsuapi.DRSUAPI_DS_NAME_FORMAT_NT4_ACCOUNT_NAME_SANS_DOMAIN_EX, + drsuapi.DRSUAPI_DS_NAME_FORMAT_LIST_GLOBAL_CATALOG_SERVERS, + drsuapi.DRSUAPI_DS_NAME_FORMAT_UPN_FOR_LOGON, + drsuapi.DRSUAPI_DS_NAME_FORMAT_LIST_SERVERS_WITH_DCS_IN_SITE, + drsuapi.DRSUAPI_DS_NAME_FORMAT_STRING_SID_NAME, + drsuapi.DRSUAPI_DS_NAME_FORMAT_ALT_SECURITY_IDENTITIES_NAME, + drsuapi.DRSUAPI_DS_NAME_FORMAT_LIST_NCS, + drsuapi.DRSUAPI_DS_NAME_FORMAT_LIST_DOMAINS, + drsuapi.DRSUAPI_DS_NAME_FORMAT_MAP_SCHEMA_GUID, + drsuapi.DRSUAPI_DS_NAME_FORMAT_NT4_ACCOUNT_NAME_SANS_DOMAIN, + drsuapi.DRSUAPI_DS_NAME_FORMAT_LIST_ROLES, + drsuapi.DRSUAPI_DS_NAME_FORMAT_LIST_INFO_FOR_SERVER, + drsuapi.DRSUAPI_DS_NAME_FORMAT_LIST_SERVERS_FOR_DOMAIN_IN_SITE, + drsuapi.DRSUAPI_DS_NAME_FORMAT_LIST_DOMAINS_IN_SITE, + drsuapi.DRSUAPI_DS_NAME_FORMAT_LIST_SERVERS_IN_SITE, + drsuapi.DRSUAPI_DS_NAME_FORMAT_LIST_SITES, +] + + +def warning(message): + print("\033[37;41;1m" "Warning: %s" "\033[00m" % (message)) + +############################################################################### +# +# Packet generation functions: +# +# All the packet generation functions have the following form: +# packet_${protocol}_${opcode}(packet, conversation, context) +# +# The functions return true, if statistics should be collected for the packet +# false, the packet has been ignored. +# +# Where: +# protocol is the protocol, i.e. cldap, dcerpc, ... +# opcode is the protocol op code i.e. type of the packet to be +# generated. +# +# packet contains data about the captured/generated packet +# provides any extra data needed to generate the packet +# +# conversation Details of the current client/server interaction +# +# context state data for the current interaction +# +# +# +# The following protocols are not currently handled: +# smb +# smb2 +# browser +# smb_netlogon +# +# The following drsuapi replication packets are currently ignored: +# DsReplicaSync +# DsGetNCChanges +# DsReplicaUpdateRefs + + +# Packet generators that do NOTHING are assigned to the null_packet +# function which allows the conversation generators to notice this and +# avoid a whole lot of pointless work. +def null_packet(packet, conversation, context): + return False + + +def packet_cldap_3(packet, conversation, context): + # searchRequest + net = Net(creds=context.creds, lp=context.lp) + net.finddc(domain=context.lp.get('realm'), + flags=(nbt.NBT_SERVER_LDAP | + nbt.NBT_SERVER_DS | + nbt.NBT_SERVER_WRITABLE)) + return True + + +packet_cldap_5 = null_packet +# searchResDone + +packet_dcerpc_0 = null_packet +# Request +# Can be ignored, it's the continuation of an existing conversation + +packet_dcerpc_2 = null_packet +# Request +# Server response, so should be ignored + +packet_dcerpc_3 = null_packet + +packet_dcerpc_11 = null_packet +# Bind +# creation of the rpc dcerpc connection is managed by the higher level +# protocol drivers. So we ignore it when generating traffic + + +packet_dcerpc_12 = null_packet +# Bind_ack +# Server response, so should be ignored + + +packet_dcerpc_13 = null_packet +# Bind_nak +# Server response, so should be ignored + + +packet_dcerpc_14 = null_packet +# Alter_context +# Generated as part of the connect process + + +def packet_dcerpc_15(packet, conversation, context): + # Alter_context_resp + # This means it was GSSAPI/krb5 (probably) + # Check the kerberos_state and issue a diagnostic if kerberos not enabled + if context.user_creds.get_kerberos_state() == DONT_USE_KERBEROS: + warning("Kerberos disabled but have dcerpc Alter_context_resp " + "indicating Kerberos was used") + return False + + +def packet_dcerpc_16(packet, conversation, context): + # AUTH3 + # This means it was NTLMSSP + # Check the kerberos_state and issue a diagnostic if kerberos enabled + if context.user_creds.get_kerberos_state() == MUST_USE_KERBEROS: + warning("Kerberos enabled but have dcerpc AUTH3 " + "indicating NTLMSSP was used") + return False + + +def packet_dns_0(packet, conversation, context): + # query + name, rtype = context.guess_a_dns_lookup() + dns.resolver.query(name, rtype) + return True + + +packet_dns_1 = null_packet +# response +# Server response, so should be ignored + + +def packet_drsuapi_0(packet, conversation, context): + # DsBind + context.get_drsuapi_connection_pair(True) + return True + + +NAME_FORMATS = [getattr(drsuapi, _x) for _x in dir(drsuapi) + if 'NAME_FORMAT' in _x] + + +def packet_drsuapi_12(packet, conversation, context): + # DsCrackNames + drs, handle = context.get_drsuapi_connection_pair() + + names = drsuapi.DsNameString() + names.str = context.server + + req = drsuapi.DsNameRequest1() + req.format_flags = 0 + req.format_offered = 7 + req.format_desired = random.choice(name_formats) + req.codepage = 1252 + req.language = 1033 # German, I think + req.format_flags = 0 + req.count = 1 + req.names = [names] + + (result, ctr) = drs.DsCrackNames(handle, 1, req) + return True + + +def packet_drsuapi_13(packet, conversation, context): + # DsWriteAccountSpn + req = drsuapi.DsWriteAccountSpnRequest1() + req.operation = drsuapi.DRSUAPI_DS_SPN_OPERATION_REPLACE + req.unknown1 = 0 # Unused, must be 0 + req.object_dn = context.user_dn + req.count = 1 # only 1 name + spn_name = drsuapi.DsNameString() + spn_name.str = 'foo/{}'.format(context.username) + req.spn_names = [spn_name] + (drs, handle) = context.get_drsuapi_connection_pair() + (level, res) = drs.DsWriteAccountSpn(handle, 1, req) + return True + + +def packet_drsuapi_1(packet, conversation, context): + # DsUnbind + (drs, handle) = context.get_drsuapi_connection_pair() + drs.DsUnbind(handle) + del context.drsuapi_connections[-1] + return True + + +packet_drsuapi_2 = null_packet +# DsReplicaSync +# This is between DCs, triggered on a DB change +# Ignoring for now + + +packet_drsuapi_3 = null_packet +# DsGetNCChanges +# This is between DCs, trigger with DB operation, +# or DsReplicaSync between DCs. +# Ignoring for now + + +packet_drsuapi_4 = null_packet +# DsReplicaUpdateRefs +# Ignoring for now + + +packet_epm_3 = null_packet +# Map +# Will be generated by higher level protocol calls + + +def packet_kerberos_(packet, conversation, context): + # Use the presence of kerberos packets as a hint to enable kerberos + # for the rest of the conversation. + # i.e. kerberos packets are not explicitly generated. + context.user_creds.set_kerberos_state(MUST_USE_KERBEROS) + context.user_creds_bad.set_kerberos_state(MUST_USE_KERBEROS) + context.machine_creds.set_kerberos_state(MUST_USE_KERBEROS) + context.machine_creds_bad.set_kerberos_state(MUST_USE_KERBEROS) + context.creds.set_kerberos_state(MUST_USE_KERBEROS) + return False + + +packet_ldap_ = null_packet +# Unknown +# The ldap payload was probably encrypted so just ignore it. + + +def packet_ldap_0(packet, conversation, context): + # bindRequest + if packet.extra[5] == "simple": + # Perform a simple bind. + context.get_ldap_connection(new=True, simple=True) + else: + # Perform a sasl bind. + context.get_ldap_connection(new=True, simple=False) + return True + + +packet_ldap_1 = null_packet +# bindResponse +# Server response ignored for traffic generation + + +def packet_ldap_2(packet, conversation, context): + # unbindRequest + # pop the last one off -- most likely we're in a bind/unbind ping. + del context.ldap_connections[-1:] + return False + + +def packet_ldap_3(packet, conversation, context): + # searchRequest + + (scope, dn_sig, filter, attrs, extra, desc, oid) = packet.extra + if not scope: + scope = SCOPE_BASE + + samdb = context.get_ldap_connection() + dn = context.get_matching_dn(dn_sig) + + # try to guess the search expression (don't bother for base searches, as + # they're only looking up a single object) + if (filter is None or filter == '') and scope != SCOPE_BASE: + filter = context.guess_search_filter(attrs, dn_sig, dn) + + samdb.search(dn, + expression=filter, + scope=int(scope), + attrs=attrs.split(','), + controls=["paged_results:1:1000"]) + return True + + +packet_ldap_4 = null_packet +# searchResEntry +# Server response ignored for traffic generation + + +packet_ldap_5 = null_packet +# Server response ignored for traffic generation + +packet_ldap_6 = null_packet + +packet_ldap_7 = null_packet + +packet_ldap_8 = null_packet + +packet_ldap_9 = null_packet + +packet_ldap_16 = null_packet + +packet_lsarpc_0 = null_packet +# lsarClose + +packet_lsarpc_1 = null_packet +# lsarDelete + +packet_lsarpc_2 = null_packet +# lsarEnumeratePrivileges + +packet_lsarpc_3 = null_packet +# LsarQuerySecurityObject + +packet_lsarpc_4 = null_packet +# LsarSetSecurityObject + +packet_lsarpc_5 = null_packet +# LsarChangePassword + +packet_lsarpc_6 = null_packet +# lsa_OpenPolicy +# We ignore this, but take it as a hint that the lsarpc handle should +# be over a named pipe. +# + + +def packet_lsarpc_14(packet, conversation, context): + # lsa_LookupNames + c = context.get_lsarpc_named_pipe_connection() + + objectAttr = lsa.ObjectAttribute() + pol_handle = c.OpenPolicy2(u'', objectAttr, + security.SEC_FLAG_MAXIMUM_ALLOWED) + + sids = lsa.TransSidArray() + names = [lsa.String("This Organization"), + lsa.String("Digest Authentication")] + level = lsa.LSA_LOOKUP_NAMES_ALL + count = 0 + c.LookupNames(pol_handle, names, sids, level, count) + return True + + +def packet_lsarpc_15(packet, conversation, context): + # lsa_LookupSids + c = context.get_lsarpc_named_pipe_connection() + + objectAttr = lsa.ObjectAttribute() + pol_handle = c.OpenPolicy2(u'', objectAttr, + security.SEC_FLAG_MAXIMUM_ALLOWED) + + sids = lsa.SidArray() + sid = lsa.SidPtr() + + x = dom_sid("S-1-5-7") + sid.sid = x + sids.sids = [sid] + sids.num_sids = 1 + names = lsa.TransNameArray() + level = lsa.LSA_LOOKUP_NAMES_ALL + count = 0 + + c.LookupSids(pol_handle, sids, names, level, count) + return True + + +def packet_lsarpc_39(packet, conversation, context): + # lsa_QueryTrustedDomainInfoBySid + # Samba does not support trusted domains, so this call is expected to fail + # + c = context.get_lsarpc_named_pipe_connection() + + objectAttr = lsa.ObjectAttribute() + + pol_handle = c.OpenPolicy2(u'', objectAttr, + security.SEC_FLAG_MAXIMUM_ALLOWED) + + domsid = security.dom_sid(context.domain_sid) + level = 1 + try: + c.QueryTrustedDomainInfoBySid(pol_handle, domsid, level) + except NTSTATUSError as error: + # Object Not found is the expected result from samba, + # while No Such Domain is the expected result from windows, + # anything else is a failure. + if not check_runtime_error(error, NT_STATUS_OBJECT_NAME_NOT_FOUND) \ + and not check_runtime_error(error, NT_STATUS_NO_SUCH_DOMAIN): + raise + return True + + +packet_lsarpc_40 = null_packet +# lsa_SetTrustedDomainInfo +# Not currently supported + + +packet_lsarpc_43 = null_packet +# LsaStorePrivateData + + +packet_lsarpc_44 = null_packet +# LsaRetrievePrivateData + + +packet_lsarpc_68 = null_packet +# LsarLookupNames3 + + +def packet_lsarpc_76(packet, conversation, context): + # lsa_LookupSids3 + c = context.get_lsarpc_connection() + sids = lsa.SidArray() + sid = lsa.SidPtr() + # Need a set + x = dom_sid("S-1-5-7") + sid.sid = x + sids.sids = [sid] + sids.num_sids = 1 + names = lsa.TransNameArray2() + level = lsa.LSA_LOOKUP_NAMES_ALL + count = 0 + lookup_options = lsa.LSA_LOOKUP_OPTION_SEARCH_ISOLATED_NAMES + client_revision = lsa.LSA_CLIENT_REVISION_2 + c.LookupSids3(sids, names, level, count, lookup_options, client_revision) + return True + + +def packet_lsarpc_77(packet, conversation, context): + # lsa_LookupNames4 + c = context.get_lsarpc_connection() + sids = lsa.TransSidArray3() + names = [lsa.String("This Organization"), + lsa.String("Digest Authentication")] + level = lsa.LSA_LOOKUP_NAMES_ALL + count = 0 + lookup_options = lsa.LSA_LOOKUP_OPTION_SEARCH_ISOLATED_NAMES + client_revision = lsa.LSA_CLIENT_REVISION_2 + c.LookupNames4(names, sids, level, count, lookup_options, client_revision) + return True + + +def packet_nbns_0(packet, conversation, context): + # query + n = Node() + try: + n.query_name("ANAME", context.server, timeout=4, broadcast=False) + except: + pass + return True + + +packet_nbns_1 = null_packet +# response +# Server response, not generated by the client + + +packet_rpc_netlogon_0 = null_packet + +packet_rpc_netlogon_1 = null_packet + +packet_rpc_netlogon_4 = null_packet +# NetrServerReqChallenge +# generated by higher level protocol drivers +# ignored for traffic generation + +packet_rpc_netlogon_14 = null_packet + +packet_rpc_netlogon_15 = null_packet + +packet_rpc_netlogon_21 = null_packet +# NetrLogonDummyRoutine1 +# Used to determine security settings. Triggered from schannel setup +# So no need for an explicit generator + + +packet_rpc_netlogon_26 = null_packet +# NetrServerAuthenticate3 +# Triggered from schannel set up, no need for an explicit generator + + +def packet_rpc_netlogon_29(packet, conversation, context): + # NetrLogonGetDomainInfo [531] + c = context.get_netlogon_connection() + (auth, succ) = context.get_authenticator() + query = netr_WorkstationInformation() + + c.netr_LogonGetDomainInfo(context.server, + context.netbios_name, + auth, + succ, + 2, # TODO are there other values? + query) + return True + + +def packet_rpc_netlogon_30(packet, conversation, context): + # NetrServerPasswordSet2 + c = context.get_netlogon_connection() + (auth, succ) = context.get_authenticator() + DATA_LEN = 512 + # Set the new password to the existing password, this generates the same + # work load as a new value, and leaves the account password intact for + # subsequent runs + newpass = context.machine_creds.get_password().encode('utf-16-le') + pwd_len = len(newpass) + filler = [x if isinstance(x, int) else ord(x) for x in os.urandom(DATA_LEN - pwd_len)] + pwd = netlogon.netr_CryptPassword() + pwd.length = pwd_len + pwd.data = filler + [x if isinstance(x, int) else ord(x) for x in newpass] + context.machine_creds.encrypt_netr_crypt_password(pwd) + c.netr_ServerPasswordSet2(context.server, + # must ends with $, so use get_username instead + # of get_workstation here + context.machine_creds.get_username(), + context.machine_creds.get_secure_channel_type(), + context.netbios_name, + auth, + pwd) + return True + + +packet_rpc_netlogon_34 = null_packet + + +def packet_rpc_netlogon_39(packet, conversation, context): + # NetrLogonSamLogonEx [4331] + def connect(creds): + c = context.get_netlogon_connection() + + # Disable Kerberos in cli creds to extract NTLM response + old_state = creds.get_kerberos_state() + creds.set_kerberos_state(DONT_USE_KERBEROS) + + logon = samlogon_logon_info(context.domain, + context.netbios_name, + creds) + logon_level = netlogon.NetlogonNetworkTransitiveInformation + validation_level = netlogon.NetlogonValidationSamInfo4 + netr_flags = 0 + c.netr_LogonSamLogonEx(context.server, + context.machine_creds.get_workstation(), + logon_level, + logon, + validation_level, + netr_flags) + + creds.set_kerberos_state(old_state) + + context.last_samlogon_bad =\ + context.with_random_bad_credentials(connect, + context.user_creds, + context.user_creds_bad, + context.last_samlogon_bad) + return True + + +def samlogon_target(domain_name, computer_name): + target_info = ntlmssp.AV_PAIR_LIST() + target_info.count = 3 + computername = ntlmssp.AV_PAIR() + computername.AvId = ntlmssp.MsvAvNbComputerName + computername.Value = computer_name + + domainname = ntlmssp.AV_PAIR() + domainname.AvId = ntlmssp.MsvAvNbDomainName + domainname.Value = domain_name + + eol = ntlmssp.AV_PAIR() + eol.AvId = ntlmssp.MsvAvEOL + target_info.pair = [domainname, computername, eol] + + return ndr_pack(target_info) + + +def samlogon_logon_info(domain_name, computer_name, creds): + + target_info_blob = samlogon_target(domain_name, computer_name) + + challenge = b"abcdefgh" + # User account under test + response = creds.get_ntlm_response(flags=CLI_CRED_NTLMv2_AUTH, + challenge=challenge, + target_info=target_info_blob) + + logon = netlogon.netr_NetworkInfo() + + logon.challenge = [x if isinstance(x, int) else ord(x) for x in challenge] + logon.nt = netlogon.netr_ChallengeResponse() + logon.nt.length = len(response["nt_response"]) + logon.nt.data = [x if isinstance(x, int) else ord(x) for x in response["nt_response"]] + + logon.identity_info = netlogon.netr_IdentityInfo() + + (username, domain) = creds.get_ntlm_username_domain() + logon.identity_info.domain_name.string = domain + logon.identity_info.account_name.string = username + logon.identity_info.workstation.string = creds.get_workstation() + + return logon + + +def packet_rpc_netlogon_40(packet, conversation, context): + # DsrEnumerateDomainTrusts + c = context.get_netlogon_connection() + c.netr_DsrEnumerateDomainTrusts( + context.server, + netlogon.NETR_TRUST_FLAG_IN_FOREST | + netlogon.NETR_TRUST_FLAG_OUTBOUND | + netlogon.NETR_TRUST_FLAG_INBOUND) + return True + + +def packet_rpc_netlogon_45(packet, conversation, context): + # NetrLogonSamLogonWithFlags [7] + def connect(creds): + c = context.get_netlogon_connection() + (auth, succ) = context.get_authenticator() + + # Disable Kerberos in cli creds to extract NTLM response + old_state = creds.get_kerberos_state() + creds.set_kerberos_state(DONT_USE_KERBEROS) + + logon = samlogon_logon_info(context.domain, + context.netbios_name, + creds) + logon_level = netlogon.NetlogonNetworkTransitiveInformation + validation_level = netlogon.NetlogonValidationSamInfo4 + netr_flags = 0 + c.netr_LogonSamLogonWithFlags(context.server, + context.machine_creds.get_workstation(), + auth, + succ, + logon_level, + logon, + validation_level, + netr_flags) + + creds.set_kerberos_state(old_state) + + context.last_samlogon_bad =\ + context.with_random_bad_credentials(connect, + context.user_creds, + context.user_creds_bad, + context.last_samlogon_bad) + return True + + +def packet_samr_0(packet, conversation, context): + # Open + c = context.get_samr_context() + c.get_handle() + return True + + +def packet_samr_1(packet, conversation, context): + # Close + c = context.get_samr_context() + s = c.get_connection() + # close the last opened handle, may not always be accurate + # but will do for load simulation + if c.user_handle is not None: + s.Close(c.user_handle) + c.user_handle = None + elif c.group_handle is not None: + s.Close(c.group_handle) + c.group_handle = None + elif c.domain_handle is not None: + s.Close(c.domain_handle) + c.domain_handle = None + c.rids = None + elif c.handle is not None: + s.Close(c.handle) + c.handle = None + c.domain_sid = None + return True + + +def packet_samr_3(packet, conversation, context): + # QuerySecurity + c = context.get_samr_context() + s = c.get_connection() + if c.user_handle is None: + packet_samr_34(packet, conversation, context) + s.QuerySecurity(c.user_handle, 1) + return True + + +def packet_samr_5(packet, conversation, context): + # LookupDomain + c = context.get_samr_context() + s = c.get_connection() + h = c.get_handle() + d = lsa.String() + d.string = context.domain + c.domain_sid = s.LookupDomain(h, d) + return True + + +def packet_samr_6(packet, conversation, context): + # EnumDomains + c = context.get_samr_context() + s = c.get_connection() + h = c.get_handle() + s.EnumDomains(h, 0, 0) + return True + + +def packet_samr_7(packet, conversation, context): + # OpenDomain + c = context.get_samr_context() + s = c.get_connection() + h = c.get_handle() + if c.domain_sid is None: + packet_samr_5(packet, conversation, context) + + c.domain_handle = s.OpenDomain(h, + security.SEC_FLAG_MAXIMUM_ALLOWED, + c.domain_sid) + return True + + +SAMR_QUERY_DOMAIN_INFO_LEVELS = [8, 12] + + +def packet_samr_8(packet, conversation, context): + # QueryDomainInfo [228] + c = context.get_samr_context() + s = c.get_connection() + if c.domain_handle is None: + packet_samr_7(packet, conversation, context) + level = random.choice(SAMR_QUERY_DOMAIN_INFO_LEVELS) + s.QueryDomainInfo(c.domain_handle, level) + return True + + +packet_samr_14 = null_packet +# CreateDomainAlias +# Ignore these for now. + + +def packet_samr_15(packet, conversation, context): + # EnumDomainAliases + c = context.get_samr_context() + s = c.get_connection() + if c.domain_handle is None: + packet_samr_7(packet, conversation, context) + + s.EnumDomainAliases(c.domain_handle, 100, 0) + return True + + +def packet_samr_16(packet, conversation, context): + # GetAliasMembership + c = context.get_samr_context() + s = c.get_connection() + if c.domain_handle is None: + packet_samr_7(packet, conversation, context) + + sids = lsa.SidArray() + sid = lsa.SidPtr() + sid.sid = c.domain_sid + sids.sids = [sid] + s.GetAliasMembership(c.domain_handle, sids) + return True + + +def packet_samr_17(packet, conversation, context): + # LookupNames + c = context.get_samr_context() + s = c.get_connection() + if c.domain_handle is None: + packet_samr_7(packet, conversation, context) + + name = lsa.String(context.username) + c.rids = s.LookupNames(c.domain_handle, [name]) + return True + + +def packet_samr_18(packet, conversation, context): + # LookupRids + c = context.get_samr_context() + s = c.get_connection() + if c.rids is None: + packet_samr_17(packet, conversation, context) + rids = [] + for r in c.rids: + for i in r.ids: + rids.append(i) + s.LookupRids(c.domain_handle, rids) + return True + + +def packet_samr_19(packet, conversation, context): + # OpenGroup + c = context.get_samr_context() + s = c.get_connection() + if c.domain_handle is None: + packet_samr_7(packet, conversation, context) + + rid = 0x202 # Users I think. + c.group_handle = s.OpenGroup(c.domain_handle, + security.SEC_FLAG_MAXIMUM_ALLOWED, + rid) + return True + + +def packet_samr_25(packet, conversation, context): + # QueryGroupMember + c = context.get_samr_context() + s = c.get_connection() + if c.group_handle is None: + packet_samr_19(packet, conversation, context) + s.QueryGroupMember(c.group_handle) + return True + + +def packet_samr_34(packet, conversation, context): + # OpenUser + c = context.get_samr_context() + s = c.get_connection() + if c.rids is None: + packet_samr_17(packet, conversation, context) + c.user_handle = s.OpenUser(c.domain_handle, + security.SEC_FLAG_MAXIMUM_ALLOWED, + c.rids[0].ids[0]) + return True + + +def packet_samr_36(packet, conversation, context): + # QueryUserInfo + c = context.get_samr_context() + s = c.get_connection() + if c.user_handle is None: + packet_samr_34(packet, conversation, context) + level = 1 + s.QueryUserInfo(c.user_handle, level) + return True + + +packet_samr_37 = null_packet + + +def packet_samr_39(packet, conversation, context): + # GetGroupsForUser + c = context.get_samr_context() + s = c.get_connection() + if c.user_handle is None: + packet_samr_34(packet, conversation, context) + s.GetGroupsForUser(c.user_handle) + return True + + +packet_samr_40 = null_packet + +packet_samr_44 = null_packet + + +def packet_samr_57(packet, conversation, context): + # Connect2 + c = context.get_samr_context() + c.get_handle() + return True + + +def packet_samr_64(packet, conversation, context): + # Connect5 + c = context.get_samr_context() + c.get_handle() + return True + + +packet_samr_68 = null_packet + + +def packet_srvsvc_16(packet, conversation, context): + # NetShareGetInfo + s = context.get_srvsvc_connection() + server_unc = "\\\\" + context.server + share_name = "IPC$" + level = 1 + s.NetShareGetInfo(server_unc, share_name, level) + return True + + +def packet_srvsvc_21(packet, conversation, context): + """NetSrvGetInfo + + FIXME: Level changed from 102 to 101 here, to bypass Windows error. + + Level 102 will cause WERR_ACCESS_DENIED error against Windows, because: + + > If the level is 102 or 502, the Windows implementation checks whether + > the caller is a member of one of the groups previously mentioned or + > is a member of the Power Users local group. + + It passed against Samba since this check is not implemented by Samba yet. + + refer to: + + https://msdn.microsoft.com/en-us/library/cc247297.aspx#Appendix_A_80 + + """ + srvsvc = context.get_srvsvc_connection() + server_unc = "\\\\" + context.server + level = 101 + srvsvc.NetSrvGetInfo(server_unc, level) + return True diff --git a/python/samba/forest_update.py b/python/samba/forest_update.py new file mode 100644 index 0000000..46de213 --- /dev/null +++ b/python/samba/forest_update.py @@ -0,0 +1,543 @@ +# Samba4 Forest update checker +# +# Copyright (C) Andrew Bartlett 2017 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import ldb +import samba +from samba import sd_utils +from samba.dcerpc import security +from samba.provision.common import setup_path +from samba.dsdb import ( + DS_DOMAIN_FUNCTION_2008, + DS_DOMAIN_FUNCTION_2008_R2, + DS_DOMAIN_FUNCTION_2012, + DS_DOMAIN_FUNCTION_2012_R2, + DS_DOMAIN_FUNCTION_2016, +) + +MIN_UPDATE = 11 +MAX_UPDATE = 142 + +update_map = { + # Missing updates from 2008 + 11: "27a03717-5963-48fc-ba6f-69faa33e70ed", + 12: "3467dae5-dedd-4648-9066-f48ac186b20a", + 13: "33b7ee33-1386-47cf-baa1-b03e06473253", + 14: "e9ee8d55-c2fb-4723-a333-c80ff4dfbf45", + 15: "ccfae63a-7fb5-454c-83ab-0e8e1214974e", + 16: "ad3c7909-b154-4c16-8bf7-2c3a7870bb3d", + 17: "26ad2ebf-f8f5-44a4-b97c-a616c8b9d09a", + 18: "4444c516-f43a-4c12-9c4b-b5c064941d61", + 19: "436a1a4b-f41a-46e6-ac86-427720ef29f3", + 20: "b2b7fb45-f50d-41bc-a73b-8f580f3b636a", + 21: "1bdf6366-c3db-4d0b-b8cb-f99ba9bce20f", + 22: "63c0f51a-067c-4640-8a4f-044fb33f1049", + 23: "dae441c0-366e-482e-98d9-60a99a1898cc", + 24: "7dd09ca6-f0d6-43bf-b7f8-ef348f435617", + 25: "6b800a81-affe-4a15-8e41-6ea0c7aa89e4", + 26: "dd07182c-3174-4c95-902a-d64fee285bbf", + 27: "ffa5ee3c-1405-476d-b344-7ad37d69cc25", + 28: "099f1587-af70-49c6-ab6c-7b3e82be0fe2", + 29: "1a3f6b15-55f2-4752-ba27-3d38a8232c4d", + 30: "dee21a17-4e8e-4f40-a58c-c0c009b685a7", + 31: "9bd98bb4-4047-4de5-bf4c-7bd1d0f6d21d", + 32: "3fe80fbf-bf39-4773-b5bd-3e5767a30d2d", + 33: "f02915e2-9141-4f73-b8e7-2804662782da", + 34: "39902c52-ef24-4b4b-8033-2c9dfdd173a2", + 35: "20bf09b4-6d0b-4cd1-9c09-4231edf1209b", + 36: "94f238bb-831c-11d6-977b-00c04f613221", + 37: "94f238bc-831c-11d6-977b-00c04f613221", + 38: "94f238bd-831c-11d6-977b-00c04f613221", + 39: "94f238be-831c-11d6-977b-00c04f613221", + 40: "94f238bf-831c-11d6-977b-00c04f613221", + 41: "94f238c0-831c-11d6-977b-00c04f613221", + 42: "eda27b47-e610-11d6-9793-00c04f613221", + 43: "eda27b48-e610-11d6-9793-00c04f613221", + 44: "eda27b49-e610-11d6-9793-00c04f613221", + 45: "eda27b4a-e610-11d6-9793-00c04f613221", + 46: "26d9c510-e61a-11d6-9793-00c04f613221", + 47: "26d9c511-e61a-11d6-9793-00c04f613221", + 48: "ea08c04c-f474-4212-b19e-5e754f9210d4", + 49: "4c0672a2-437c-4944-b953-5db8f111d665", + 50: "4c022fd1-adab-4d84-a7f1-9580f03da856", + 51: "c03b1f37-c240-4910-93c8-1544a452b4b5", + 52: "560cf82d-9572-48a3-9024-6f2b56f1f866", + 53: "abd97102-88dd-4013-a009-0e2c2f967ff6", + 54: "134428a8-0043-48a6-bcda-63310d9ec4dd", + 55: "d668ad1f-cedd-4565-ab02-9385926ce4f5", + 56: "8f86b825-c322-4101-adc4-579f12d445db", + 57: "9fea28ff-387f-4d57-866d-3893c50f373f", + 58: "782370ce-3d38-438d-8b0c-464220a3039d", + 59: "002fb291-0d00-4b0c-8c00-fe7f50ce6f8d", + 60: "dcb3c95d-deb7-4c51-ad13-43a7d5d06fc7", + 61: "ef010a1e-bd88-48c8-a7af-2affd250d77d", + 62: "bd3413c0-9559-469b-9f3d-51d7faabd81a", + 63: "f814097b-3e3d-49ba-8a3a-092c25085f06", + 64: "6eb8eaf9-3403-4ba5-8b4b-ce349a4680ad", + 65: "07e57d28-ad40-44fc-8334-8a0dc119b3f4", + 66: "6fd48655-1698-497a-ac8d-8267ce01c80b", + 67: "10338d31-2423-4dff-b4b5-ef025144b01f", + 68: "a96e2ed5-7a7c-4d5c-9d5d-965eca0051da", + 69: "613bd063-e8e9-4a62-8f4c-cda566f7eb6f", + 70: "2a858903-5696-4364-b4e5-4cac027ca7a6", + 71: "0fc5a978-0059-4b0a-9dc2-9896e8e389a1", + 72: "4d753a29-26ac-4d1a-bc80-311f947e4f0a", + 73: "3b3adbdb-4485-4559-aed8-9811c4bf90e4", + 74: "56040c71-fe93-4037-8fe9-1a4d1a283009", + 75: "caa2bfad-0cca-483b-8d00-347f943292a8", + 76: "2b9e0609-6d75-498a-9727-c9fcc93f0e42", + 77: "96541a16-910a-4b66-acde-720a0dff03c7", + 78: "429a6334-1a00-4515-bf48-676deb55954a", + # Windows Server 2008 R2 - version 5 + 79: "21ae657c-6649-43c4-bbb3-7f184fdf58c1", + 80: "dca8f425-baae-47cd-b424-e3f6c76ed08b", + 81: "a662b036-dbbe-4166-b4ba-21abea17f9cc", + 82: "9d17b863-18c3-497d-9bde-45ddb95fcb65", + 83: "11c39bed-4bee-45f5-b195-8da0e05b573a", + # Windows Server 2012 - version 11 + 84: "4664e973-cb20-4def-b3d5-559d6fe123e0", + 85: "2972d92d-a07a-44ac-9cb0-bf243356f345", + 86: "09a49cb3-6c54-4b83-ab20-8370838ba149", + 87: "77283e65-ce02-4dc3-8c1e-bf99b22527c2", + 88: "0afb7f53-96bd-404b-a659-89e65c269420", + 89: "c7f717ef-fdbe-4b4b-8dfc-fa8b839fbcfa", + 90: "00232167-f3a4-43c6-b503-9acb7a81b01c", + 91: "73a9515b-511c-44d2-822b-444a33d3bd33", + 92: "e0c60003-2ed7-4fd3-8659-7655a7e79397", + 93: "ed0c8cca-80ab-4b6b-ac5a-59b1d317e11f", + 94: "b6a6c19a-afc9-476b-8994-61f5b14b3f05", + 95: "defc28cd-6cb6-4479-8bcb-aabfb41e9713", + 96: "d6bd96d4-e66b-4a38-9c6b-e976ff58c56d", + 97: "bb8efc40-3090-4fa2-8a3f-7cd1d380e695", + 98: "2d6abe1b-4326-489e-920c-76d5337d2dc5", + 99: "6b13dfb5-cecc-4fb8-b28d-0505cea24175", + 100: "92e73422-c68b-46c9-b0d5-b55f9c741410", + 101: "c0ad80b4-8e84-4cc4-9163-2f84649bcc42", + 102: "992fe1d0-6591-4f24-a163-c820fcb7f308", + 103: "ede85f96-7061-47bf-b11b-0c0d999595b5", + 104: "ee0f3271-eb51-414a-bdac-8f9ba6397a39", + 105: "587d52e0-507e-440e-9d67-e6129f33bb68", + 106: "ce24f0f6-237e-43d6-ac04-1e918ab04aac", + 107: "7f77d431-dd6a-434f-ae4d-ce82928e498f", + 108: "ba14e1f6-7cd1-4739-804f-57d0ea74edf4", + 109: "156ffa2a-e07c-46fb-a5c4-fbd84a4e5cce", + 110: "7771d7dd-2231-4470-aa74-84a6f56fc3b6", + 111: "49b2ae86-839a-4ea0-81fe-9171c1b98e83", + 112: "1b1de989-57ec-4e96-b933-8279a8119da4", + 113: "281c63f0-2c9a-4cce-9256-a238c23c0db9", + 114: "4c47881a-f15a-4f6c-9f49-2742f7a11f4b", + 115: "2aea2dc6-d1d3-4f0c-9994-66c1da21de0f", + 116: "ae78240c-43b9-499e-ae65-2b6e0f0e202a", + 117: "261b5bba-3438-4d5c-a3e9-7b871e5f57f0", + 118: "3fb79c05-8ea1-438c-8c7a-81f213aa61c2", + 119: "0b2be39a-d463-4c23-8290-32186759d3b1", + 120: "f0842b44-bc03-46a1-a860-006e8527fccd", + 121: "93efec15-4dd9-4850-bc86-a1f2c8e2ebb9", + 122: "9e108d96-672f-40f0-b6bd-69ee1f0b7ac4", + 123: "1e269508-f862-4c4a-b01f-420d26c4ff8c", + 125: "e1ab17ed-5efb-4691-ad2d-0424592c5755", + 126: "0e848bd4-7c70-48f2-b8fc-00fbaa82e360", + 127: "016f23f7-077d-41fa-a356-de7cfdb01797", + 128: "49c140db-2de3-44c2-a99a-bab2e6d2ba81", + 129: "e0b11c80-62c5-47f7-ad0d-3734a71b8312", + 130: "2ada1a2d-b02f-4731-b4fe-59f955e24f71", + # Windows Server 2012 R2 - version 15 + 131: "b83818c1-01a6-4f39-91b7-a3bb581c3ae3", + 132: "bbbb9db0-4009-4368-8c40-6674e980d3c3", + 133: "f754861c-3692-4a7b-b2c2-d0fa28ed0b0b", + 134: "d32f499f-3026-4af0-a5bd-13fe5a331bd2", + 135: "38618886-98ee-4e42-8cf1-d9a2cd9edf8b", + # Windows Server 2016 - version 16 + 136: "328092fb-16e7-4453-9ab8-7592db56e9c4", + 137: "3a1c887f-df0a-489f-b3f2-2d0409095f6e", + 138: "232e831f-f988-4444-8e3e-8a352e2fd411", + 139: "ddddcf0c-bec9-4a5a-ae86-3cfe6cc6e110", + 140: "a0a45aac-5550-42df-bb6a-3cc5c46b52f2", + 141: "3e7645f3-3ea5-4567-b35a-87630449c70c", + 142: "e634067b-e2c4-4d79-b6e8-73c619324d5e", +} + +functional_level_to_max_update = { + DS_DOMAIN_FUNCTION_2008: 78, + DS_DOMAIN_FUNCTION_2008_R2: 83, + DS_DOMAIN_FUNCTION_2012: 130, + DS_DOMAIN_FUNCTION_2012_R2: 135, + DS_DOMAIN_FUNCTION_2016: 142, +} + +functional_level_to_version = { + DS_DOMAIN_FUNCTION_2008: 2, + DS_DOMAIN_FUNCTION_2008_R2: 5, + DS_DOMAIN_FUNCTION_2012: 11, + DS_DOMAIN_FUNCTION_2012_R2: 15, + DS_DOMAIN_FUNCTION_2016: 16, +} + +# Documentation says that this update was deprecated +missing_updates = [124] + + +class ForestUpdateException(Exception): + pass + + +class ForestUpdate(object): + """Check and update a SAM database for forest updates""" + + def __init__(self, samdb, verbose=False, fix=False, + add_update_container=True): + """ + :param samdb: LDB database + :param verbose: Show the ldif changes + :param fix: Apply the update if the container is missing + :param add_update_container: Add the container at the end of the change + :raise ForestUpdateException: + """ + from samba.ms_forest_updates_markdown import read_ms_markdown + + self.samdb = samdb + self.fix = fix + self.verbose = verbose + self.add_update_container = add_update_container + # TODO In future we should check for inconsistencies when it claims it has been done + self.check_update_applied = False + + self.config_dn = self.samdb.get_config_basedn() + self.domain_dn = self.samdb.domain_dn() + self.schema_dn = self.samdb.get_schema_basedn() + + self.sd_utils = sd_utils.SDUtils(samdb) + self.domain_sid = security.dom_sid(samdb.get_domain_sid()) + + self.forestupdate_container = self.samdb.get_config_basedn() + try: + self.forestupdate_container.add_child("CN=Operations,CN=ForestUpdates") + except ldb.LdbError: + raise ForestUpdateException("Failed to add forest update container child") + + self.revision_object = self.samdb.get_config_basedn() + try: + self.revision_object.add_child("CN=ActiveDirectoryUpdate,CN=ForestUpdates") + except ldb.LdbError: + raise ForestUpdateException("Failed to add revision object child") + + # Store the result of parsing the markdown in a dictionary + self.stored_ldif = {} + read_ms_markdown(setup_path("adprep/WindowsServerDocs/Forest-Wide-Updates.md"), + out_dict=self.stored_ldif) + + def check_updates_functional_level(self, functional_level, + old_functional_level=None, + update_revision=False): + """ + Apply all updates for a given old and new functional level + :param functional_level: constant + :param old_functional_level: constant + :param update_revision: modify the stored version + :raise ForestUpdateException: + """ + res = self.samdb.search(base=self.revision_object, + attrs=["revision"], scope=ldb.SCOPE_BASE) + + expected_update = functional_level_to_max_update[functional_level] + + if old_functional_level: + min_update = functional_level_to_max_update[old_functional_level] + min_update += 1 + else: + min_update = MIN_UPDATE + + self.check_updates_range(min_update, expected_update) + + expected_version = functional_level_to_version[functional_level] + found_version = int(res[0]['revision'][0]) + if update_revision and found_version < expected_version: + if not self.fix: + raise ForestUpdateException("Revision is not high enough. Fix is set to False." + "\nExpected: %dGot: %d" % (expected_version, + found_version)) + self.samdb.modify_ldif("""dn: %s +changetype: modify +replace: revision +revision: %d + """ % (str(self.revision_object), expected_version)) + + def check_updates_iterator(self, iterator): + """ + Apply a list of updates which must be within the valid range of updates + :param iterator: Iterable specifying integer update numbers to apply + :raise ForestUpdateException: + """ + for op in iterator: + if op < MIN_UPDATE or op > MAX_UPDATE: + raise ForestUpdateException("Update number invalid.") + + if 84 <= op <= 87: + self.operation_ldif(op) + elif 91 <= op <= 126: + self.operation_ldif(op) + elif 131 <= op <= 134: + self.operation_ldif(op) + elif 136 <= op <= 142: + self.operation_ldif(op) + else: + # No LDIF file exists for the change + getattr(self, "operation_%d" % op)(op) + + def check_updates_range(self, start=0, end=0): + """ + Apply a range of updates which must be within the valid range of updates + :param start: integer update to begin + :param end: integer update to end (inclusive) + :raise ForestUpdateException: + """ + op = start + if start < MIN_UPDATE or start > end or end > MAX_UPDATE: + raise ForestUpdateException("Update number invalid.") + while op <= end: + if op in missing_updates: + pass + elif 84 <= op <= 87: + self.operation_ldif(op) + elif 91 <= op <= 126: + self.operation_ldif(op) + elif 131 <= op <= 134: + self.operation_ldif(op) + elif 136 <= op <= 142: + self.operation_ldif(op) + else: + # No LDIF file exists for the change + getattr(self, "operation_%d" % op)(op) + + op += 1 + + def update_exists(self, op): + """ + :param op: Integer update number + :return: True if update exists else False + """ + update_dn = "CN=%s,%s" % (update_map[op], self.forestupdate_container) + try: + res = self.samdb.search(base=update_dn, + scope=ldb.SCOPE_BASE, + attrs=[]) + except ldb.LdbError as e: + (num, msg) = e.args + if num != ldb.ERR_NO_SUCH_OBJECT: + raise + return False + + assert len(res) == 1 + print("Skip Forest Update %u: %s" % (op, update_map[op])) + return True + + def update_add(self, op): + """ + Add the corresponding container object for the given update + :param op: Integer update + """ + self.samdb.add_ldif("""dn: CN=%s,%s +objectClass: container +""" % (update_map[op], str(self.forestupdate_container))) + print("Applied Forest Update %u: %s" % (op, update_map[op])) + + def operation_ldif(self, op): + if self.update_exists(op): + # Assume we have applied it (we have no double checks for these) + return True + + guid = update_map[op] + if guid in self.stored_ldif: + ldif = self.stored_ldif[guid] + elif guid.lower() in self.stored_ldif: + ldif = self.stored_ldif[guid.lower()] + elif guid.upper() in self.stored_ldif: + ldif = self.stored_ldif[guid.upper()] + else: + raise ForestUpdateException("OPERATION %d: ldif for %s not found" % + (op, guid)) + + sub_ldif = samba.substitute_var(ldif, {"CONFIG_DN": + str(self.config_dn), + "FOREST_ROOT_DOMAIN": + str(self.domain_dn), + "SCHEMA_DN": + str(self.schema_dn)}) + if self.verbose: + print("UPDATE (LDIF) ------ OPERATION %d" % op) + print(sub_ldif) + + try: + self.samdb.modify_ldif(sub_ldif) + except ldb.LdbError as e: + (num, msg) = e.args + if num != ldb.ERR_ATTRIBUTE_OR_VALUE_EXISTS: + raise e + pass + + if self.add_update_container: + self.update_add(op) + + def raise_if_not_fix(self, op): + """ + Raises an exception if not set to fix. + :param op: Integer operation + :raise ForestUpdateException: + """ + if not self.fix: + raise ForestUpdateException("Missing operation %d. Fix is currently set to False" % op) + + # + # Created a new object CN=Sam-Domain in the Schema partition + # + # Created the following access control entry (ACE) to grant Write Property + # to Principal Self on the object: ... + # + def operation_88(self, op): + if self.update_exists(op): + return + self.raise_if_not_fix(op) + + ace = "(OA;CIIO;WP;ea1b7b93-5e48-46d5-bc6c-4df4fda78a35;bf967a86-0de6-11d0-a285-00aa003049e2;PS)" + + schema_dn = ldb.Dn(self.samdb, "CN=Sam-Domain,%s" % str(self.schema_dn)) + + self.sd_utils.update_aces_in_dacl(schema_dn, + sddl_attr="defaultSecurityDescriptor", + add_aces=[ace]) + + if self.add_update_container: + self.update_add(op) + + # + # Created a new object CN=Domain-DNS in the Schema partition + # + # Created the following access control entry (ACE) to grant Write Property + # to Principal Self on the object: ... + # + def operation_89(self, op): + if self.update_exists(op): + return + self.raise_if_not_fix(op) + + ace = "(OA;CIIO;WP;ea1b7b93-5e48-46d5-bc6c-4df4fda78a35;bf967a86-0de6-11d0-a285-00aa003049e2;PS)" + + schema_dn = ldb.Dn(self.samdb, "CN=Domain-DNS,%s" % str(self.schema_dn)) + + self.sd_utils.update_aces_in_dacl(schema_dn, + sddl_attr="defaultSecurityDescriptor", + add_aces=[ace]) + + if self.add_update_container: + self.update_add(op) + + # Update display specifiers + def operation_90(self, op): + if self.add_update_container and not self.update_exists(op): + self.update_add(op) + + # Update display specifiers + def operation_127(self, op): + if self.add_update_container and not self.update_exists(op): + self.update_add(op) + + # Update appears to already be applied in documentation + def operation_128(self, op): + if self.add_update_container and not self.update_exists(op): + self.update_add(op) + + # Grant ACE (OA;CIOI;RPWP;3f78c3e5-f79a-46bd-a0b8-9d18116ddc79;;PS) to samDomain + def operation_129(self, op): + if self.update_exists(op): + return + self.raise_if_not_fix(op) + + ace = "(OA;CIOI;RPWP;3f78c3e5-f79a-46bd-a0b8-9d18116ddc79;;PS)" + + schema_dn = ldb.Dn(self.samdb, "CN=Sam-Domain,%s" % str(self.schema_dn)) + + self.sd_utils.update_aces_in_dacl(schema_dn, + sddl_attr="defaultSecurityDescriptor", + add_aces=[ace]) + + if self.add_update_container: + self.update_add(op) + + # Grant ACE (OA;CIOI;RPWP;3f78c3e5-f79a-46bd-a0b8-9d18116ddc79;;PS) to domainDNS + def operation_130(self, op): + if self.update_exists(op): + return + self.raise_if_not_fix(op) + + ace = "(OA;CIOI;RPWP;3f78c3e5-f79a-46bd-a0b8-9d18116ddc79;;PS)" + + schema_dn = ldb.Dn(self.samdb, "CN=Domain-DNS,%s" % str(self.schema_dn)) + + self.sd_utils.update_aces_in_dacl(schema_dn, + sddl_attr="defaultSecurityDescriptor", + add_aces=[ace]) + + if self.add_update_container: + self.update_add(op) + + # Set msDS-ClaimIsValueSpaceRestricted on ad://ext/AuthenticationSilo to FALSE + def operation_135(self, op): + if self.update_exists(op): + return + self.raise_if_not_fix(op) + + self.samdb.modify_ldif("""dn: CN=ad://ext/AuthenticationSilo,CN=Claim Types,CN=Claims Configuration,CN=Services,%s +changetype: modify +replace: msDS-ClaimIsValueSpaceRestricted +msDS-ClaimIsValueSpaceRestricted: FALSE +""" % self.config_dn, + controls=["relax:0", "provision:0"]) + + if self.add_update_container: + self.update_add(op) + + # + # THE FOLLOWING ARE MISSING UPDATES FROM 2008 + 2008 R2 + # + + def operation_11(self, op): + if self.add_update_container and not self.update_exists(op): + self.update_add(op) + + def operation_54(self, op): + if self.add_update_container and not self.update_exists(op): + self.update_add(op) + + def operation_79(self, op): + if self.add_update_container and not self.update_exists(op): + self.update_add(op) + + def operation_80(self, op): + if self.add_update_container and not self.update_exists(op): + self.update_add(op) + + def operation_81(self, op): + if self.add_update_container and not self.update_exists(op): + self.update_add(op) + + def operation_82(self, op): + if self.add_update_container and not self.update_exists(op): + self.update_add(op) + + def operation_83(self, op): + if self.add_update_container and not self.update_exists(op): + self.update_add(op) diff --git a/python/samba/functional_level.py b/python/samba/functional_level.py new file mode 100644 index 0000000..e5ccf39 --- /dev/null +++ b/python/samba/functional_level.py @@ -0,0 +1,83 @@ +# domain management - common code +# +# Copyright Catlayst .Net Ltd 2017-2023 +# Copyright Jelmer Vernooij 2007-2012 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +from samba.dsdb import ( + DS_DOMAIN_FUNCTION_2000, + DS_DOMAIN_FUNCTION_2003, + DS_DOMAIN_FUNCTION_2008, + DS_DOMAIN_FUNCTION_2008_R2, + DS_DOMAIN_FUNCTION_2012, + DS_DOMAIN_FUNCTION_2012_R2, + DS_DOMAIN_FUNCTION_2003_MIXED, + DS_DOMAIN_FUNCTION_2016 +) + +string_version_to_constant = { + "2000": DS_DOMAIN_FUNCTION_2000, + "2003": DS_DOMAIN_FUNCTION_2003, + "2008": DS_DOMAIN_FUNCTION_2008, + "2008_R2": DS_DOMAIN_FUNCTION_2008_R2, + "2012": DS_DOMAIN_FUNCTION_2012, + "2012_R2": DS_DOMAIN_FUNCTION_2012_R2, + "2016": DS_DOMAIN_FUNCTION_2016, +} + + +def string_to_level(string): + """Interpret a string indicating a functional level.""" + return string_version_to_constant[string] + + +def level_to_string(level): + """turn the level enum number into a printable string.""" + if level < DS_DOMAIN_FUNCTION_2000: + return "invalid" + strings = { + DS_DOMAIN_FUNCTION_2000: "2000", + DS_DOMAIN_FUNCTION_2003_MIXED: + "2003 with mixed domains/interim (NT4 DC support)", + DS_DOMAIN_FUNCTION_2003: "2003", + DS_DOMAIN_FUNCTION_2008: "2008", + DS_DOMAIN_FUNCTION_2008_R2: "2008 R2", + DS_DOMAIN_FUNCTION_2012: "2012", + DS_DOMAIN_FUNCTION_2012_R2: "2012 R2", + DS_DOMAIN_FUNCTION_2016: "2016", + } + return strings.get(level, "higher than 2016") + +def dc_level_from_lp(lp): + """Return the ad dc functional level as an integer from a LoadParm""" + + # I don't like the RuntimeError here, but these "can't happen" + # except by a developer stuffup. + + smb_conf_dc_functional_level = lp.get('ad dc functional level') + if smb_conf_dc_functional_level is None: + # This shouldn't be possible, except if the default option + # value is not in the loadparm enum table + raise RuntimeError("'ad dc functional level' in smb.conf unrecognised!") + + try: + return string_to_level(smb_conf_dc_functional_level) + except KeyError: + # This shouldn't be possible at all, unless the table in + # python/samba/functional_level.py is not a superset of that + # in lib/param/param_table.c + raise RuntimeError(f"'ad dc functional level = {smb_conf_dc_functional_level}'" + " in smb.conf is not valid!") diff --git a/python/samba/getopt.py b/python/samba/getopt.py new file mode 100644 index 0000000..0935ed0 --- /dev/null +++ b/python/samba/getopt.py @@ -0,0 +1,539 @@ +# Samba-specific bits for optparse +# Copyright (C) Jelmer Vernooij 2007 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +"""Support for parsing Samba-related command-line options.""" + +__docformat__ = "restructuredText" + +import optparse +import os +import sys +from abc import ABCMeta, abstractmethod +from copy import copy + +from samba.credentials import ( + Credentials, + AUTO_USE_KERBEROS, + DONT_USE_KERBEROS, + MUST_USE_KERBEROS, +) +from samba._glue import get_burnt_commandline + + +def check_bytes(option, opt, value): + """Custom option type to allow the input of sizes using byte, kb, mb ... + + units, e.g. 2Gb, 4KiB ... + e.g. Option("--size", type="bytes", metavar="SIZE") + """ + + multipliers = {"B": 1, + "KB": 1024, + "MB": 1024 * 1024, + "GB": 1024 * 1024 * 1024} + + # strip out any spaces + v = value.replace(" ", "") + + # extract the numeric prefix + digits = "" + while v and v[0:1].isdigit() or v[0:1] == '.': + digits += v[0] + v = v[1:] + + try: + m = float(digits) + except ValueError: + msg = ("{0} option requires a numeric value, " + "with an optional unit suffix").format(opt) + raise optparse.OptionValueError(msg) + + # strip out the 'i' and convert to upper case so + # kib Kib kb KB are all equivalent + suffix = v.upper().replace("I", "") + try: + return m * multipliers[suffix] + except KeyError as k: + msg = ("{0} invalid suffix '{1}', " + "should be B, Kb, Mb or Gb").format(opt, v) + raise optparse.OptionValueError(msg) + + +class OptionMissingError(optparse.OptionValueError): + """One or more Options with required=True is missing.""" + + def __init__(self, options): + """Raised when required Options are missing from the command line. + + :param options: list of 1 or more option + """ + self.options = options + + def __str__(self): + if len(self.options) == 1: + missing = self.options[0] + return f"Argument {missing} is required." + else: + options = sorted([str(option) for option in self.options]) + missing = ", ".join(options) + return f"The arguments {missing} are required." + + +class ValidationError(Exception): + """ValidationError is the exception raised by validators. + + Should be raised from the __call__ method of the Validator subclass. + """ + pass + + +class Validator(metaclass=ABCMeta): + """Base class for Validators used by SambaOption. + + Subclass this to make custom validators and implement __call__. + """ + + @abstractmethod + def __call__(self, field, value): + pass + + +class Option(optparse.Option): + ATTRS = optparse.Option.ATTRS + ["required", "validators"] + TYPES = optparse.Option.TYPES + ("bytes",) + TYPE_CHECKER = copy(optparse.Option.TYPE_CHECKER) + TYPE_CHECKER["bytes"] = check_bytes + + def run_validators(self, opt, value): + """Runs the list of validators on the current option.""" + validators = getattr(self, "validators") or [] + for validator in validators: + validator(opt, value) + + def convert_value(self, opt, value): + """Override convert_value to run validators just after. + + This can also be done in process() but there we would have to + replace the entire method. + """ + value = super().convert_value(opt, value) + self.run_validators(opt, value) + return value + + +class OptionParser(optparse.OptionParser): + """Samba OptionParser, adding support for required=True on Options.""" + + def __init__(self, + usage=None, + option_list=None, + option_class=Option, + version=None, + conflict_handler="error", + description=None, + formatter=None, + add_help_option=True, + prog=None, + epilog=None): + """ + Ensure that option_class defaults to the Samba one. + """ + super().__init__(usage, option_list, option_class, version, + conflict_handler, description, formatter, + add_help_option, prog, epilog) + + def check_values(self, values, args): + """Loop through required options if value is missing raise exception.""" + missing = [] + for option in self._get_all_options(): + if option.required: + value = getattr(values, option.dest) + if value is None: + missing.append(option) + + if missing: + raise OptionMissingError(missing) + + return super().check_values(values, args) + + +class OptionGroup(optparse.OptionGroup): + """Samba OptionGroup base class. + + Provides a generic set_option method to be used as Option callback, + so that one doesn't need to be created for every available Option. + + Also overrides the add_option method, so it correctly initialises + the defaults on the OptionGroup. + """ + + def add_option(self, *args, **kwargs): + """Override add_option so it applies defaults during constructor.""" + opt = super().add_option(*args, **kwargs) + default = None if opt.default == optparse.NO_DEFAULT else opt.default + self.set_option(opt, opt.get_opt_string(), default, self.parser) + return opt + + def set_option(self, option, opt_str, arg, parser): + """Callback to set the attribute based on the Option dest name.""" + dest = option.dest or option._long_opts[0][2:].replace("-", "_") + setattr(self, dest, arg) + + +class SambaOptions(OptionGroup): + """General Samba-related command line options.""" + + def __init__(self, parser): + from samba import fault_setup + fault_setup() + + # This removes passwords from the commandline via + # setproctitle() but makes no change to python sys.argv so we + # can continue to process as normal + # + # get_burnt_commandline returns None if no change is needed + new_proctitle = get_burnt_commandline(sys.argv) + if new_proctitle is not None: + try: + import setproctitle + setproctitle.setproctitle(new_proctitle) + + except ModuleNotFoundError: + msg = ("WARNING: Using passwords on command line is insecure. " + "Installing the setproctitle python module will hide " + "these from shortly after program start.\n") + sys.stderr.write(msg) + sys.stderr.flush() + + from samba.param import LoadParm + super().__init__(parser, "Samba Common Options") + self.add_option("-s", "--configfile", action="callback", + type=str, metavar="FILE", help="Configuration file", + callback=self._load_configfile) + self.add_option("-d", "--debuglevel", action="callback", + type=str, metavar="DEBUGLEVEL", help="debug level", + callback=self._set_debuglevel) + self.add_option("--option", action="callback", + type=str, metavar="OPTION", + help="set smb.conf option from command line", + callback=self._set_option) + self.add_option("--realm", action="callback", + type=str, metavar="REALM", help="set the realm name", + callback=self._set_realm) + self._configfile = None + self._lp = LoadParm() + self.realm = None + + def get_loadparm_path(self): + """Return path to the smb.conf file specified on the command line.""" + return self._configfile + + def _load_configfile(self, option, opt_str, arg, parser): + self._configfile = arg + + def _set_debuglevel(self, option, opt_str, arg, parser): + try: + self._lp.set('debug level', arg) + except RuntimeError: + raise optparse.OptionValueError( + f"invalid -d/--debug value: '{arg}'") + parser.values.debuglevel = arg + + def _set_realm(self, option, opt_str, arg, parser): + try: + self._lp.set('realm', arg) + except RuntimeError: + raise optparse.OptionValueError( + f"invalid --realm value: '{arg}'") + self.realm = arg + + def _set_option(self, option, opt_str, arg, parser): + if arg.find('=') == -1: + raise optparse.OptionValueError( + "--option option takes a 'a=b' argument") + a = arg.split('=', 1) + try: + self._lp.set(a[0], a[1]) + except Exception as e: + raise optparse.OptionValueError( + "invalid --option option value %r: %s" % (arg, e)) + + def get_loadparm(self): + """Return loadparm object with data specified on the command line.""" + if self._configfile is not None: + self._lp.load(self._configfile) + elif os.getenv("SMB_CONF_PATH") is not None: + self._lp.load(os.getenv("SMB_CONF_PATH")) + else: + self._lp.load_default() + return self._lp + + +class Samba3Options(SambaOptions): + """General Samba-related command line options with an s3 param.""" + + def __init__(self, parser): + super().__init__(parser) + from samba.samba3 import param as s3param + self._lp = s3param.get_context() + + +class HostOptions(OptionGroup): + """Command line options for connecting to target host or database.""" + + def __init__(self, parser): + super().__init__(parser, "Host Options") + + self.add_option("-H", "--URL", + help="LDB URL for database or target server", + type=str, metavar="URL", action="callback", + callback=self.set_option, dest="H") + + +class VersionOptions(OptionGroup): + """Command line option for printing Samba version.""" + def __init__(self, parser): + super().__init__(parser, "Version Options") + self.add_option("-V", "--version", action="callback", + callback=self._display_version, + help="Display version number") + + def _display_version(self, option, opt_str, arg, parser): + import samba + print(samba.version) + sys.exit(0) + + +def parse_kerberos_arg_legacy(arg, opt_str): + if arg.lower() in ["yes", 'true', '1']: + return MUST_USE_KERBEROS + elif arg.lower() in ["no", 'false', '0']: + return DONT_USE_KERBEROS + elif arg.lower() in ["auto"]: + return AUTO_USE_KERBEROS + else: + raise optparse.OptionValueError("invalid %s option value: %s" % + (opt_str, arg)) + + +def parse_kerberos_arg(arg, opt_str): + if arg.lower() == 'required': + return MUST_USE_KERBEROS + elif arg.lower() == 'desired': + return AUTO_USE_KERBEROS + elif arg.lower() == 'off': + return DONT_USE_KERBEROS + else: + raise optparse.OptionValueError("invalid %s option value: %s" % + (opt_str, arg)) + + +class CredentialsOptions(OptionGroup): + """Command line options for specifying credentials.""" + + def __init__(self, parser, special_name=None): + self.special_name = special_name + if special_name is not None: + self.section = "Credentials Options (%s)" % special_name + else: + self.section = "Credentials Options" + + self.ask_for_password = True + self.ipaddress = None + self.machine_pass = False + super().__init__(parser, self.section) + self._add_option("--simple-bind-dn", metavar="DN", action="callback", + callback=self._set_simple_bind_dn, type=str, + help="DN to use for a simple bind") + self._add_option("--password", metavar="PASSWORD", action="callback", + help="Password", type=str, callback=self._set_password) + self._add_option("-U", "--username", metavar="USERNAME", + action="callback", type=str, + help="Username", callback=self._parse_username) + self._add_option("-W", "--workgroup", metavar="WORKGROUP", + action="callback", type=str, + help="Workgroup", callback=self._parse_workgroup) + self._add_option("-N", "--no-pass", action="callback", + help="Don't ask for a password", + callback=self._set_no_password) + self._add_option("", "--ipaddress", metavar="IPADDRESS", + action="callback", type=str, + help="IP address of server", + callback=self._set_ipaddress) + self._add_option("-P", "--machine-pass", + action="callback", + help="Use stored machine account password", + callback=self._set_machine_pass) + self._add_option("--use-kerberos", metavar="desired|required|off", + action="callback", type=str, + help="Use Kerberos authentication", callback=self._set_kerberos) + self._add_option("--use-krb5-ccache", metavar="KRB5CCNAME", + action="callback", type=str, + help="Kerberos Credentials cache", + callback=self._set_krb5_ccache) + self._add_option("-A", "--authentication-file", metavar="AUTHFILE", + action="callback", type=str, + help="Authentication file", + callback=self._set_auth_file) + + # LEGACY + self._add_option("-k", "--kerberos", metavar="KERBEROS", + action="callback", type=str, + help="DEPRECATED: Migrate to --use-kerberos", callback=self._set_kerberos_legacy) + self.creds = Credentials() + + def _add_option(self, *args1, **kwargs): + if self.special_name is None: + return self.add_option(*args1, **kwargs) + + args2 = () + for a in args1: + if not a.startswith("--"): + continue + args2 += (a.replace("--", "--%s-" % self.special_name),) + self.add_option(*args2, **kwargs) + + def _parse_username(self, option, opt_str, arg, parser): + self.creds.parse_string(arg) + self.machine_pass = False + + def _parse_workgroup(self, option, opt_str, arg, parser): + self.creds.set_domain(arg) + + def _set_password(self, option, opt_str, arg, parser): + self.creds.set_password(arg) + self.ask_for_password = False + self.machine_pass = False + + def _set_no_password(self, option, opt_str, arg, parser): + self.ask_for_password = False + + def _set_machine_pass(self, option, opt_str, arg, parser): + self.machine_pass = True + + def _set_ipaddress(self, option, opt_str, arg, parser): + self.ipaddress = arg + + def _set_kerberos_legacy(self, option, opt_str, arg, parser): + print('WARNING: The option -k|--kerberos is deprecated!') + self.creds.set_kerberos_state(parse_kerberos_arg_legacy(arg, opt_str)) + + def _set_kerberos(self, option, opt_str, arg, parser): + self.creds.set_kerberos_state(parse_kerberos_arg(arg, opt_str)) + + def _set_simple_bind_dn(self, option, opt_str, arg, parser): + self.creds.set_bind_dn(arg) + + def _set_krb5_ccache(self, option, opt_str, arg, parser): + self.creds.set_kerberos_state(MUST_USE_KERBEROS) + self.creds.set_named_ccache(arg) + + def _set_auth_file(self, option, opt_str, arg, parser): + if os.path.exists(arg): + self.creds.parse_file(arg) + self.ask_for_password = False + self.machine_pass = False + + def get_credentials(self, lp, fallback_machine=False): + """Obtain the credentials set on the command-line. + + :param lp: Loadparm object to use. + :return: Credentials object + """ + self.creds.guess(lp) + if self.machine_pass: + self.creds.set_machine_account(lp) + elif self.ask_for_password: + self.creds.set_cmdline_callbacks() + + # possibly fallback to using the machine account, if we have + # access to the secrets db + if fallback_machine and not self.creds.authentication_requested(): + try: + self.creds.set_machine_account(lp) + except Exception: + pass + + return self.creds + + +class CredentialsOptionsDouble(CredentialsOptions): + """Command line options for specifying credentials of two servers.""" + + def __init__(self, parser): + super().__init__(parser) + self.no_pass2 = True + self.add_option("--simple-bind-dn2", metavar="DN2", action="callback", + callback=self._set_simple_bind_dn2, type=str, + help="DN to use for a simple bind") + self.add_option("--password2", metavar="PASSWORD2", action="callback", + help="Password", type=str, + callback=self._set_password2) + self.add_option("--username2", metavar="USERNAME2", + action="callback", type=str, + help="Username for second server", + callback=self._parse_username2) + self.add_option("--workgroup2", metavar="WORKGROUP2", + action="callback", type=str, + help="Workgroup for second server", + callback=self._parse_workgroup2) + self.add_option("--no-pass2", action="store_true", + help="Don't ask for a password for the second server") + self.add_option("--use-kerberos2", metavar="desired|required|off", + action="callback", type=str, + help="Use Kerberos authentication", callback=self._set_kerberos2) + + # LEGACY + self.add_option("--kerberos2", metavar="KERBEROS2", + action="callback", type=str, + help="Use Kerberos", callback=self._set_kerberos2_legacy) + self.creds2 = Credentials() + + def _parse_username2(self, option, opt_str, arg, parser): + self.creds2.parse_string(arg) + + def _parse_workgroup2(self, option, opt_str, arg, parser): + self.creds2.set_domain(arg) + + def _set_password2(self, option, opt_str, arg, parser): + self.creds2.set_password(arg) + self.no_pass2 = False + + def _set_kerberos2_legacy(self, option, opt_str, arg, parser): + self.creds2.set_kerberos_state(parse_kerberos_arg(arg, opt_str)) + + def _set_kerberos2(self, option, opt_str, arg, parser): + self.creds2.set_kerberos_state(parse_kerberos_arg(arg, opt_str)) + + def _set_simple_bind_dn2(self, option, opt_str, arg, parser): + self.creds2.set_bind_dn(arg) + + def get_credentials2(self, lp, guess=True): + """Obtain the credentials set on the command-line. + + :param lp: Loadparm object to use. + :param guess: Try guess Credentials from environment + :return: Credentials object + """ + if guess: + self.creds2.guess(lp) + elif not self.creds2.get_username(): + self.creds2.set_anonymous() + + if self.no_pass2: + self.creds2.set_cmdline_callbacks() + return self.creds2 diff --git a/python/samba/gkdi.py b/python/samba/gkdi.py new file mode 100644 index 0000000..4179263 --- /dev/null +++ b/python/samba/gkdi.py @@ -0,0 +1,397 @@ +# Unix SMB/CIFS implementation. +# Copyright (C) Catalyst.Net Ltd 2023 +# +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +"""Group Key Distribution Service module""" + +from enum import Enum +from functools import total_ordering +from typing import Optional, Tuple + +from cryptography.hazmat.primitives import hashes + +from samba import _glue +from samba.dcerpc import gkdi, misc +from samba.ndr import ndr_pack, ndr_unpack +from samba.nt_time import NtTime, NtTimeDelta + + +uint64_max: int = 2**64 - 1 + +L1_KEY_ITERATION: int = _glue.GKDI_L1_KEY_ITERATION +L2_KEY_ITERATION: int = _glue.GKDI_L2_KEY_ITERATION +KEY_CYCLE_DURATION: NtTimeDelta = _glue.GKDI_KEY_CYCLE_DURATION +MAX_CLOCK_SKEW: NtTimeDelta = _glue.GKDI_MAX_CLOCK_SKEW + +KEY_LEN_BYTES = 64 + + +class Algorithm(Enum): + SHA1 = "SHA1" + SHA256 = "SHA256" + SHA384 = "SHA384" + SHA512 = "SHA512" + + def algorithm(self) -> hashes.HashAlgorithm: + if self is Algorithm.SHA1: + return hashes.SHA1() + + if self is Algorithm.SHA256: + return hashes.SHA256() + + if self is Algorithm.SHA384: + return hashes.SHA384() + + if self is Algorithm.SHA512: + return hashes.SHA512() + + raise RuntimeError("unknown hash algorithm {self}") + + def __repr__(self) -> str: + return str(self) + + @staticmethod + def from_kdf_parameters(kdf_param: Optional[bytes]) -> "Algorithm": + if not kdf_param: + return Algorithm.SHA256 # the default used by Windows. + + kdf_parameters = ndr_unpack(gkdi.KdfParameters, kdf_param) + return Algorithm(kdf_parameters.hash_algorithm) + + +class GkidType(Enum): + DEFAULT = object() + L0_SEED_KEY = object() + L1_SEED_KEY = object() + L2_SEED_KEY = object() + + def description(self) -> str: + if self is GkidType.DEFAULT: + return "a default GKID" + + if self is GkidType.L0_SEED_KEY: + return "an L0 seed key" + + if self is GkidType.L1_SEED_KEY: + return "an L1 seed key" + + if self is GkidType.L2_SEED_KEY: + return "an L2 seed key" + + raise RuntimeError("unknown GKID type {self}") + + +class InvalidDerivation(Exception): + pass + + +class UndefinedStartTime(Exception): + pass + + +@total_ordering +class Gkid: + __slots__ = ["_l0_idx", "_l1_idx", "_l2_idx"] + + max_l0_idx = 0x7FFF_FFFF + + def __init__(self, l0_idx: int, l1_idx: int, l2_idx: int) -> None: + if not -1 <= l0_idx <= Gkid.max_l0_idx: + raise ValueError(f"L0 index {l0_idx} out of range") + + if not -1 <= l1_idx < L1_KEY_ITERATION: + raise ValueError(f"L1 index {l1_idx} out of range") + + if not -1 <= l2_idx < L2_KEY_ITERATION: + raise ValueError(f"L2 index {l2_idx} out of range") + + if l0_idx == -1 and l1_idx != -1: + raise ValueError("invalid combination of negative and non‐negative indices") + + if l1_idx == -1 and l2_idx != -1: + raise ValueError("invalid combination of negative and non‐negative indices") + + self._l0_idx = l0_idx + self._l1_idx = l1_idx + self._l2_idx = l2_idx + + @property + def l0_idx(self) -> int: + return self._l0_idx + + @property + def l1_idx(self) -> int: + return self._l1_idx + + @property + def l2_idx(self) -> int: + return self._l2_idx + + def gkid_type(self) -> GkidType: + if self.l0_idx == -1: + return GkidType.DEFAULT + + if self.l1_idx == -1: + return GkidType.L0_SEED_KEY + + if self.l2_idx == -1: + return GkidType.L1_SEED_KEY + + return GkidType.L2_SEED_KEY + + def wrapped_l1_idx(self) -> int: + if self.l1_idx == -1: + return L1_KEY_ITERATION + + return self.l1_idx + + def wrapped_l2_idx(self) -> int: + if self.l2_idx == -1: + return L2_KEY_ITERATION + + return self.l2_idx + + def derive_l1_seed_key(self) -> "Gkid": + gkid_type = self.gkid_type() + if ( + gkid_type is not GkidType.L0_SEED_KEY + and gkid_type is not GkidType.L1_SEED_KEY + ): + raise InvalidDerivation( + "Invalid attempt to derive an L1 seed key from" + f" {gkid_type.description()}" + ) + + if self.l1_idx == 0: + raise InvalidDerivation("No further derivation of L1 seed keys is possible") + + return Gkid(self.l0_idx, self.wrapped_l1_idx() - 1, self.l2_idx) + + def derive_l2_seed_key(self) -> "Gkid": + gkid_type = self.gkid_type() + if ( + gkid_type is not GkidType.L1_SEED_KEY + and gkid_type is not GkidType.L2_SEED_KEY + ): + raise InvalidDerivation( + f"Attempt to derive an L2 seed key from {gkid_type.description()}" + ) + + if self.l2_idx == 0: + raise InvalidDerivation("No further derivation of L2 seed keys is possible") + + return Gkid(self.l0_idx, self.l1_idx, self.wrapped_l2_idx() - 1) + + def __str__(self) -> str: + return f"Gkid({self.l0_idx}, {self.l1_idx}, {self.l2_idx})" + + def __repr__(self) -> str: + cls = type(self) + return ( + f"{cls.__qualname__}({repr(self.l0_idx)}, {repr(self.l1_idx)}," + f" {repr(self.l2_idx)})" + ) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Gkid): + return NotImplemented + + return (self.l0_idx, self.l1_idx, self.l2_idx) == ( + other.l0_idx, + other.l1_idx, + other.l2_idx, + ) + + def __lt__(self, other: object) -> bool: + if not isinstance(other, Gkid): + return NotImplemented + + def as_tuple(gkid: Gkid) -> Tuple[int, int, int]: + l0_idx, l1_idx, l2_idx = gkid.l0_idx, gkid.l1_idx, gkid.l2_idx + + # DEFAULT is considered less than everything else, so that the + # lexical ordering requirement in [MS-GKDI] 3.1.4.1.3 (GetKey) makes + # sense. + if gkid.gkid_type() is not GkidType.DEFAULT: + # Use the wrapped indices so that L1 seed keys are considered + # greater than their children L2 seed keys, and L0 seed keys are + # considered greater than their children L1 seed keys. + l1_idx = gkid.wrapped_l1_idx() + l2_idx = gkid.wrapped_l2_idx() + + return l0_idx, l1_idx, l2_idx + + return as_tuple(self) < as_tuple(other) + + def __hash__(self) -> int: + return hash((self.l0_idx, self.l1_idx, self.l2_idx)) + + @staticmethod + def default() -> "Gkid": + return Gkid(-1, -1, -1) + + @staticmethod + def l0_seed_key(l0_idx: int) -> "Gkid": + return Gkid(l0_idx, -1, -1) + + @staticmethod + def l1_seed_key(l0_idx: int, l1_idx: int) -> "Gkid": + return Gkid(l0_idx, l1_idx, -1) + + @staticmethod + def from_nt_time(nt_time: NtTime) -> "Gkid": + l0 = nt_time // (L1_KEY_ITERATION * L2_KEY_ITERATION * KEY_CYCLE_DURATION) + l1 = ( + nt_time + % (L1_KEY_ITERATION * L2_KEY_ITERATION * KEY_CYCLE_DURATION) + // (L2_KEY_ITERATION * KEY_CYCLE_DURATION) + ) + l2 = nt_time % (L2_KEY_ITERATION * KEY_CYCLE_DURATION) // KEY_CYCLE_DURATION + + return Gkid(l0, l1, l2) + + def start_nt_time(self) -> NtTime: + gkid_type = self.gkid_type() + if gkid_type is not GkidType.L2_SEED_KEY: + raise UndefinedStartTime( + f"{gkid_type.description()} has no defined start time" + ) + + start_time = NtTime( + ( + self.l0_idx * L1_KEY_ITERATION * L2_KEY_ITERATION + + self.l1_idx * L2_KEY_ITERATION + + self.l2_idx + ) + * KEY_CYCLE_DURATION + ) + + if not 0 <= start_time <= uint64_max: + raise OverflowError(f"start time {start_time} out of range") + + return start_time + + +class SeedKeyPair: + __slots__ = ["l1_key", "l2_key", "gkid", "hash_algorithm", "root_key_id"] + + def __init__( + self, + l1_key: Optional[bytes], + l2_key: Optional[bytes], + gkid: Gkid, + hash_algorithm: Algorithm, + root_key_id: misc.GUID, + ) -> None: + if l1_key is not None and len(l1_key) != KEY_LEN_BYTES: + raise ValueError(f"L1 key ({repr(l1_key)}) must be {KEY_LEN_BYTES} bytes") + if l2_key is not None and len(l2_key) != KEY_LEN_BYTES: + raise ValueError(f"L2 key ({repr(l2_key)}) must be {KEY_LEN_BYTES} bytes") + + self.l1_key = l1_key + self.l2_key = l2_key + self.gkid = gkid + self.hash_algorithm = hash_algorithm + self.root_key_id = root_key_id + + def __str__(self) -> str: + l1_key_hex = None if self.l1_key is None else self.l1_key.hex() + l2_key_hex = None if self.l2_key is None else self.l2_key.hex() + + return ( + f"SeedKeyPair(L1Key({l1_key_hex}), L2Key({l2_key_hex}), {self.gkid}," + f" {self.root_key_id}, {self.hash_algorithm})" + ) + + def __repr__(self) -> str: + cls = type(self) + return ( + f"{cls.__qualname__}({repr(self.l1_key)}, {repr(self.l2_key)}," + f" {repr(self.gkid)}, {repr(self.hash_algorithm)}," + f" {repr(self.root_key_id)})" + ) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, SeedKeyPair): + return NotImplemented + + return ( + self.l1_key, + self.l2_key, + self.gkid, + self.hash_algorithm, + self.root_key_id, + ) == ( + other.l1_key, + other.l2_key, + other.gkid, + other.hash_algorithm, + other.root_key_id, + ) + + def __hash__(self) -> int: + return hash(( + self.l1_key, + self.l2_key, + self.gkid, + self.hash_algorithm, + ndr_pack(self.root_key_id), + )) + + +class GroupKey: + __slots__ = ["gkid", "key", "hash_algorithm", "root_key_id"] + + def __init__( + self, key: bytes, gkid: Gkid, hash_algorithm: Algorithm, root_key_id: misc.GUID + ) -> None: + if key is not None and len(key) != KEY_LEN_BYTES: + raise ValueError(f"Key ({repr(key)}) must be {KEY_LEN_BYTES} bytes") + + self.key = key + self.gkid = gkid + self.hash_algorithm = hash_algorithm + self.root_key_id = root_key_id + + def __str__(self) -> str: + return ( + f"GroupKey(Key({self.key.hex()}), {self.gkid}, {self.hash_algorithm}," + f" {self.root_key_id})" + ) + + def __repr__(self) -> str: + cls = type(self) + return ( + f"{cls.__qualname__}({repr(self.key)}, {repr(self.gkid)}," + f" {repr(self.hash_algorithm)}, {repr(self.root_key_id)})" + ) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, GroupKey): + return NotImplemented + + return (self.key, self.gkid, self.hash_algorithm, self.root_key_id) == ( + other.key, + other.gkid, + other.hash_algorithm, + other.root_key_id, + ) + + def __hash__(self) -> int: + return hash( + (self.key, self.gkid, self.hash_algorithm, ndr_pack(self.root_key_id)) + ) diff --git a/python/samba/gp/__init__.py b/python/samba/gp/__init__.py new file mode 100644 index 0000000..af6e639 --- /dev/null +++ b/python/samba/gp/__init__.py @@ -0,0 +1,17 @@ +# Unix SMB/CIFS implementation. +# Copyright (C) David Mulder 2023 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from samba.gp.gpclass import get_gpo_list diff --git a/python/samba/gp/gp_centrify_crontab_ext.py b/python/samba/gp/gp_centrify_crontab_ext.py new file mode 100644 index 0000000..b1055a1 --- /dev/null +++ b/python/samba/gp/gp_centrify_crontab_ext.py @@ -0,0 +1,135 @@ +# gp_centrify_crontab_ext samba gpo policy +# Copyright (C) David Mulder 2022 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os +from samba.gp.gpclass import gp_pol_ext, drop_privileges, gp_file_applier, \ + gp_misc_applier +from tempfile import NamedTemporaryFile +from samba.gp.gp_scripts_ext import fetch_crontab, install_user_crontab + +intro = ''' +### autogenerated by samba +# +# This file is generated by the gp_centrify_crontab_ext Group Policy +# Client Side Extension. To modify the contents of this file, +# modify the appropriate Group Policy objects which apply +# to this machine. DO NOT MODIFY THIS FILE DIRECTLY. +# + +''' +end = ''' +### autogenerated by samba ### +''' + +class gp_centrify_crontab_ext(gp_pol_ext, gp_file_applier): + def __str__(self): + return 'Centrify/CrontabEntries' + + def process_group_policy(self, deleted_gpo_list, changed_gpo_list, + cdir=None): + for guid, settings in deleted_gpo_list: + if str(self) in settings: + for attribute, script in settings[str(self)].items(): + self.unapply(guid, attribute, script) + + for gpo in changed_gpo_list: + if gpo.file_sys_path: + section = \ + 'Software\\Policies\\Centrify\\UnixSettings\\CrontabEntries' + pol_file = 'MACHINE/Registry.pol' + path = os.path.join(gpo.file_sys_path, pol_file) + pol_conf = self.parse(path) + if not pol_conf: + continue + entries = [] + for e in pol_conf.entries: + if e.keyname == section and e.data.strip(): + entries.append(e.data) + def applier_func(entries): + cron_dir = '/etc/cron.d' if not cdir else cdir + with NamedTemporaryFile(prefix='gp_', mode="w+", + delete=False, dir=cron_dir) as f: + contents = intro + for entry in entries: + contents += '%s\n' % entry + contents += end + f.write(contents) + return [f.name] + attribute = self.generate_attribute(gpo.name) + value_hash = self.generate_value_hash(*entries) + self.apply(gpo.name, attribute, value_hash, applier_func, + entries) + + # Remove scripts for this GPO which are no longer applied + self.clean(gpo.name, keep=attribute) + + def rsop(self, gpo, target='MACHINE'): + output = {} + section = 'Software\\Policies\\Centrify\\UnixSettings\\CrontabEntries' + pol_file = '%s/Registry.pol' % target + if gpo.file_sys_path: + path = os.path.join(gpo.file_sys_path, pol_file) + pol_conf = self.parse(path) + if not pol_conf: + return output + for e in pol_conf.entries: + if e.keyname == section and e.data.strip(): + if str(self) not in output.keys(): + output[str(self)] = [] + output[str(self)].append(e.data) + return output + +class gp_user_centrify_crontab_ext(gp_centrify_crontab_ext, gp_misc_applier): + def unapply(self, guid, attribute, entry): + others, entries = fetch_crontab(self.username) + if entry in entries: + entries.remove(entry) + install_user_crontab(self.username, others, entries) + self.cache_remove_attribute(guid, attribute) + + def apply(self, guid, attribute, entry): + old_val = self.cache_get_attribute_value(guid, attribute) + others, entries = fetch_crontab(self.username) + if not old_val or entry not in entries: + entries.append(entry) + install_user_crontab(self.username, others, entries) + self.cache_add_attribute(guid, attribute, entry) + + def process_group_policy(self, deleted_gpo_list, changed_gpo_list): + for guid, settings in deleted_gpo_list: + if str(self) in settings: + for attribute, entry in settings[str(self)].items(): + self.unapply(guid, attribute, entry) + + for gpo in changed_gpo_list: + if gpo.file_sys_path: + section = \ + 'Software\\Policies\\Centrify\\UnixSettings\\CrontabEntries' + pol_file = 'USER/Registry.pol' + path = os.path.join(gpo.file_sys_path, pol_file) + pol_conf = drop_privileges('root', self.parse, path) + if not pol_conf: + continue + attrs = [] + for e in pol_conf.entries: + if e.keyname == section and e.data.strip(): + attribute = self.generate_attribute(e.data) + attrs.append(attribute) + self.apply(gpo.name, attribute, e.data) + self.clean(gpo.name, keep=attrs) + + def rsop(self, gpo): + return super().rsop(gpo, target='USER') diff --git a/python/samba/gp/gp_centrify_sudoers_ext.py b/python/samba/gp/gp_centrify_sudoers_ext.py new file mode 100644 index 0000000..4752f1e --- /dev/null +++ b/python/samba/gp/gp_centrify_sudoers_ext.py @@ -0,0 +1,80 @@ +# gp_centrify_sudoers_ext samba gpo policy +# Copyright (C) David Mulder 2022 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os +from samba.gp.gpclass import gp_pol_ext, gp_file_applier +from samba.gp.gp_sudoers_ext import sudo_applier_func + +def ext_enabled(entries): + section = 'Software\\Policies\\Centrify\\UnixSettings' + for e in entries: + if e.keyname == section and e.valuename == 'sudo.enabled': + return e.data == 1 + return False + +class gp_centrify_sudoers_ext(gp_pol_ext, gp_file_applier): + def __str__(self): + return 'Centrify/Sudo Rights' + + def process_group_policy(self, deleted_gpo_list, changed_gpo_list, + sdir='/etc/sudoers.d'): + for guid, settings in deleted_gpo_list: + if str(self) in settings: + for attribute, sudoers in settings[str(self)].items(): + self.unapply(guid, attribute, sudoers) + + for gpo in changed_gpo_list: + if gpo.file_sys_path: + section = 'Software\\Policies\\Centrify\\UnixSettings\\SuDo' + pol_file = 'MACHINE/Registry.pol' + path = os.path.join(gpo.file_sys_path, pol_file) + pol_conf = self.parse(path) + if not pol_conf or not ext_enabled(pol_conf.entries): + continue + sudo_entries = [] + for e in pol_conf.entries: + if e.keyname == section and e.data.strip(): + if '**delvals.' in e.valuename: + continue + sudo_entries.append(e.data) + # Each GPO applies only one set of sudoers, in a + # set of files, so the attribute does not need uniqueness. + attribute = self.generate_attribute(gpo.name, *sudo_entries) + # The value hash is generated from the sudo_entries, ensuring + # any changes to this GPO will cause the files to be rewritten. + value_hash = self.generate_value_hash(*sudo_entries) + self.apply(gpo.name, attribute, value_hash, sudo_applier_func, + sdir, sudo_entries) + # Cleanup any old entries that are no longer part of the policy + self.clean(gpo.name, keep=[attribute]) + + def rsop(self, gpo): + output = {} + section = 'Software\\Policies\\Centrify\\UnixSettings\\SuDo' + pol_file = 'MACHINE/Registry.pol' + if gpo.file_sys_path: + path = os.path.join(gpo.file_sys_path, pol_file) + pol_conf = self.parse(path) + if not pol_conf: + return output + for e in pol_conf.entries: + if e.keyname == section and e.data.strip(): + if '**delvals.' in e.valuename: + continue + if str(self) not in output.keys(): + output[str(self)] = [] + output[str(self)].append(e.data) + return output diff --git a/python/samba/gp/gp_cert_auto_enroll_ext.py b/python/samba/gp/gp_cert_auto_enroll_ext.py new file mode 100644 index 0000000..9b743cb --- /dev/null +++ b/python/samba/gp/gp_cert_auto_enroll_ext.py @@ -0,0 +1,572 @@ +# gp_cert_auto_enroll_ext samba group policy +# Copyright (C) David Mulder 2021 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os +import operator +import requests +from samba.gp.gpclass import gp_pol_ext, gp_applier, GPOSTATE +from samba import Ldb +from ldb import SCOPE_SUBTREE, SCOPE_BASE +from samba.auth import system_session +from samba.gp.gpclass import get_dc_hostname +import base64 +from shutil import which +from subprocess import Popen, PIPE +import re +import json +from samba.gp.util.logging import log +import struct +try: + from cryptography.hazmat.primitives.serialization.pkcs7 import \ + load_der_pkcs7_certificates +except ModuleNotFoundError: + def load_der_pkcs7_certificates(x): return [] + log.error('python cryptography missing pkcs7 support. ' + 'Certificate chain parsing will fail') +from cryptography.hazmat.primitives.serialization import Encoding +from cryptography.x509 import load_der_x509_certificate +from cryptography.hazmat.backends import default_backend +from samba.common import get_string + +cert_wrap = b""" +-----BEGIN CERTIFICATE----- +%s +-----END CERTIFICATE-----""" +endpoint_re = '(https|HTTPS)://(?P[a-zA-Z0-9.-]+)/ADPolicyProvider' + \ + '_CEP_(?P[a-zA-Z]+)/service.svc/CEP' + +global_trust_dirs = ['/etc/pki/trust/anchors', # SUSE + '/etc/pki/ca-trust/source/anchors', # RHEL/Fedora + '/usr/local/share/ca-certificates'] # Debian/Ubuntu + +def octet_string_to_objectGUID(data): + """Convert an octet string to an objectGUID.""" + return '%s-%s-%s-%s-%s' % ('%02x' % struct.unpack('H', data[8:10])[0], + '%02x%02x' % struct.unpack('>HL', data[10:])) + + +def group_and_sort_end_point_information(end_point_information): + """Group and Sort End Point Information. + + [MS-CAESO] 4.4.5.3.2.3 + In this step autoenrollment processes the end point information by grouping + it by CEP ID and sorting in the order with which it will use the end point + to access the CEP information. + """ + # Create groups of the CertificateEnrollmentPolicyEndPoint instances that + # have the same value of the EndPoint.PolicyID datum. + end_point_groups = {} + for e in end_point_information: + if e['PolicyID'] not in end_point_groups.keys(): + end_point_groups[e['PolicyID']] = [] + end_point_groups[e['PolicyID']].append(e) + + # Sort each group by following these rules: + for end_point_group in end_point_groups.values(): + # Sort the CertificateEnrollmentPolicyEndPoint instances in ascending + # order based on the EndPoint.Cost value. + end_point_group.sort(key=lambda e: e['Cost']) + + # For instances that have the same EndPoint.Cost: + cost_list = [e['Cost'] for e in end_point_group] + costs = set(cost_list) + for cost in costs: + i = cost_list.index(cost) + j = len(cost_list)-operator.indexOf(reversed(cost_list), cost)-1 + if i == j: + continue + + # Sort those that have EndPoint.Authentication equal to Kerberos + # first. Then sort those that have EndPoint.Authentication equal to + # Anonymous. The rest of the CertificateEnrollmentPolicyEndPoint + # instances follow in an arbitrary order. + def sort_auth(e): + # 0x2 - Kerberos + if e['AuthFlags'] == 0x2: + return 0 + # 0x1 - Anonymous + elif e['AuthFlags'] == 0x1: + return 1 + else: + return 2 + end_point_group[i:j+1] = sorted(end_point_group[i:j+1], + key=sort_auth) + return list(end_point_groups.values()) + +def obtain_end_point_information(entries): + """Obtain End Point Information. + + [MS-CAESO] 4.4.5.3.2.2 + In this step autoenrollment initializes the + CertificateEnrollmentPolicyEndPoints table. + """ + end_point_information = {} + section = 'Software\\Policies\\Microsoft\\Cryptography\\PolicyServers\\' + for e in entries: + if not e.keyname.startswith(section): + continue + name = e.keyname.replace(section, '') + if name not in end_point_information.keys(): + end_point_information[name] = {} + end_point_information[name][e.valuename] = e.data + for ca in end_point_information.values(): + m = re.match(endpoint_re, ca['URL']) + if m: + name = '%s-CA' % m.group('server').replace('.', '-') + ca['name'] = name + ca['hostname'] = m.group('server') + ca['auth'] = m.group('auth') + elif ca['URL'].lower() != 'ldap:': + edata = { 'endpoint': ca['URL'] } + log.error('Failed to parse the endpoint', edata) + return {} + end_point_information = \ + group_and_sort_end_point_information(end_point_information.values()) + return end_point_information + +def fetch_certification_authorities(ldb): + """Initialize CAs. + + [MS-CAESO] 4.4.5.3.1.2 + """ + result = [] + basedn = ldb.get_default_basedn() + # Autoenrollment MUST do an LDAP search for the CA information + # (pKIEnrollmentService) objects under the following container: + dn = 'CN=Enrollment Services,CN=Public Key Services,CN=Services,CN=Configuration,%s' % basedn + attrs = ['cACertificate', 'cn', 'dNSHostName'] + expr = '(objectClass=pKIEnrollmentService)' + res = ldb.search(dn, SCOPE_SUBTREE, expr, attrs) + if len(res) == 0: + return result + for es in res: + data = { 'name': get_string(es['cn'][0]), + 'hostname': get_string(es['dNSHostName'][0]), + 'cACertificate': get_string(base64.b64encode(es['cACertificate'][0])) + } + result.append(data) + return result + +def fetch_template_attrs(ldb, name, attrs=None): + if attrs is None: + attrs = ['msPKI-Minimal-Key-Size'] + basedn = ldb.get_default_basedn() + dn = 'CN=Certificate Templates,CN=Public Key Services,CN=Services,CN=Configuration,%s' % basedn + expr = '(cn=%s)' % name + res = ldb.search(dn, SCOPE_SUBTREE, expr, attrs) + if len(res) == 1 and 'msPKI-Minimal-Key-Size' in res[0]: + return dict(res[0]) + else: + return {'msPKI-Minimal-Key-Size': ['2048']} + +def format_root_cert(cert): + return cert_wrap % re.sub(b"(.{64})", b"\\1\n", cert.encode(), 0, re.DOTALL) + +def find_cepces_submit(): + certmonger_dirs = [os.environ.get("PATH"), '/usr/lib/certmonger', + '/usr/libexec/certmonger'] + return which('cepces-submit', path=':'.join(certmonger_dirs)) + +def get_supported_templates(server): + cepces_submit = find_cepces_submit() + if not cepces_submit: + log.error('Failed to find cepces-submit') + return [] + + env = os.environ + env['CERTMONGER_OPERATION'] = 'GET-SUPPORTED-TEMPLATES' + p = Popen([cepces_submit, '--server=%s' % server, '--auth=Kerberos'], + env=env, stdout=PIPE, stderr=PIPE) + out, err = p.communicate() + if p.returncode != 0: + data = {'Error': err.decode()} + log.error('Failed to fetch the list of supported templates.', data) + return out.strip().split() + + +def getca(ca, url, trust_dir): + """Fetch Certificate Chain from the CA.""" + root_cert = os.path.join(trust_dir, '%s.crt' % ca['name']) + root_certs = [] + + try: + r = requests.get(url=url, params={'operation': 'GetCACert', + 'message': 'CAIdentifier'}) + except requests.exceptions.ConnectionError: + log.warn('Could not connect to Network Device Enrollment Service.') + r = None + if r is None or r.content == b'' or r.headers['Content-Type'] == 'text/html': + log.warn('Unable to fetch root certificates (requires NDES).') + if 'cACertificate' in ca: + log.warn('Installing the server certificate only.') + der_certificate = base64.b64decode(ca['cACertificate']) + try: + cert = load_der_x509_certificate(der_certificate) + except TypeError: + cert = load_der_x509_certificate(der_certificate, + default_backend()) + cert_data = cert.public_bytes(Encoding.PEM) + with open(root_cert, 'wb') as w: + w.write(cert_data) + root_certs.append(root_cert) + return root_certs + + if r.headers['Content-Type'] == 'application/x-x509-ca-cert': + # Older versions of load_der_x509_certificate require a backend param + try: + cert = load_der_x509_certificate(r.content) + except TypeError: + cert = load_der_x509_certificate(r.content, default_backend()) + cert_data = cert.public_bytes(Encoding.PEM) + with open(root_cert, 'wb') as w: + w.write(cert_data) + root_certs.append(root_cert) + elif r.headers['Content-Type'] == 'application/x-x509-ca-ra-cert': + certs = load_der_pkcs7_certificates(r.content) + for i in range(0, len(certs)): + cert = certs[i].public_bytes(Encoding.PEM) + filename, extension = root_cert.rsplit('.', 1) + dest = '%s.%d.%s' % (filename, i, extension) + with open(dest, 'wb') as w: + w.write(cert) + root_certs.append(dest) + else: + log.warn('getca: Wrong (or missing) MIME content type') + + return root_certs + + +def find_global_trust_dir(): + """Return the global trust dir using known paths from various Linux distros.""" + for trust_dir in global_trust_dirs: + if os.path.isdir(trust_dir): + return trust_dir + return global_trust_dirs[0] + +def update_ca_command(): + """Return the command to update the CA trust store.""" + return which('update-ca-certificates') or which('update-ca-trust') + +def changed(new_data, old_data): + """Return True if any key present in both dicts has changed.""" + return any((new_data[k] != old_data[k] if k in old_data else False) + for k in new_data.keys()) + +def cert_enroll(ca, ldb, trust_dir, private_dir, auth='Kerberos'): + """Install the root certificate chain.""" + data = dict({'files': [], 'templates': []}, **ca) + url = 'http://%s/CertSrv/mscep/mscep.dll/pkiclient.exe?' % ca['hostname'] + + log.info("Try to get root or server certificates") + + root_certs = getca(ca, url, trust_dir) + data['files'].extend(root_certs) + global_trust_dir = find_global_trust_dir() + for src in root_certs: + # Symlink the certs to global trust dir + dst = os.path.join(global_trust_dir, os.path.basename(src)) + try: + os.symlink(src, dst) + data['files'].append(dst) + log.info("Created symlink: %s -> %s" % (src, dst)) + except PermissionError: + log.warn('Failed to symlink root certificate to the' + ' admin trust anchors') + except FileNotFoundError: + log.warn('Failed to symlink root certificate to the' + ' admin trust anchors.' + ' The directory was not found', global_trust_dir) + except FileExistsError: + # If we're simply downloading a renewed cert, the symlink + # already exists. Ignore the FileExistsError. Preserve the + # existing symlink in the unapply data. + data['files'].append(dst) + + update = update_ca_command() + log.info("Running %s" % (update)) + if update is not None: + ret = Popen([update]).wait() + if ret != 0: + log.error('Failed to run %s' % (update)) + + # Setup Certificate Auto Enrollment + getcert = which('getcert') + cepces_submit = find_cepces_submit() + if getcert is not None and cepces_submit is not None: + p = Popen([getcert, 'add-ca', '-c', ca['name'], '-e', + '%s --server=%s --auth=%s' % (cepces_submit, + ca['hostname'], auth)], + stdout=PIPE, stderr=PIPE) + out, err = p.communicate() + log.debug(out.decode()) + if p.returncode != 0: + if p.returncode == 2: + log.info('The CA [%s] already exists' % ca['name']) + else: + data = {'Error': err.decode(), 'CA': ca['name']} + log.error('Failed to add Certificate Authority', data) + + supported_templates = get_supported_templates(ca['hostname']) + for template in supported_templates: + attrs = fetch_template_attrs(ldb, template) + nickname = '%s.%s' % (ca['name'], template.decode()) + keyfile = os.path.join(private_dir, '%s.key' % nickname) + certfile = os.path.join(trust_dir, '%s.crt' % nickname) + p = Popen([getcert, 'request', '-c', ca['name'], + '-T', template.decode(), + '-I', nickname, '-k', keyfile, '-f', certfile, + '-g', attrs['msPKI-Minimal-Key-Size'][0]], + stdout=PIPE, stderr=PIPE) + out, err = p.communicate() + log.debug(out.decode()) + if p.returncode != 0: + if p.returncode == 2: + log.info('The template [%s] already exists' % (nickname)) + else: + data = {'Error': err.decode(), 'Certificate': nickname} + log.error('Failed to request certificate', data) + + data['files'].extend([keyfile, certfile]) + data['templates'].append(nickname) + if update is not None: + ret = Popen([update]).wait() + if ret != 0: + log.error('Failed to run %s' % (update)) + else: + log.warn('certmonger and cepces must be installed for ' + + 'certificate auto enrollment to work') + return json.dumps(data) + +class gp_cert_auto_enroll_ext(gp_pol_ext, gp_applier): + def __str__(self): + return r'Cryptography\AutoEnrollment' + + def unapply(self, guid, attribute, value): + ca_cn = base64.b64decode(attribute) + data = json.loads(value) + getcert = which('getcert') + if getcert is not None: + Popen([getcert, 'remove-ca', '-c', ca_cn]).wait() + for nickname in data['templates']: + Popen([getcert, 'stop-tracking', '-i', nickname]).wait() + for f in data['files']: + if os.path.exists(f): + if os.path.exists(f): + os.unlink(f) + self.cache_remove_attribute(guid, attribute) + + def apply(self, guid, ca, applier_func, *args, **kwargs): + attribute = base64.b64encode(ca['name'].encode()).decode() + # If the policy has changed, unapply, then apply new policy + old_val = self.cache_get_attribute_value(guid, attribute) + old_data = json.loads(old_val) if old_val is not None else {} + templates = ['%s.%s' % (ca['name'], t.decode()) for t in get_supported_templates(ca['hostname'])] \ + if old_val is not None else [] + new_data = { 'templates': templates, **ca } + if changed(new_data, old_data) or self.cache_get_apply_state() == GPOSTATE.ENFORCE: + self.unapply(guid, attribute, old_val) + # If policy is already applied and unchanged, skip application + if old_val is not None and not changed(new_data, old_data) and \ + self.cache_get_apply_state() != GPOSTATE.ENFORCE: + return + + # Apply the policy and log the changes + data = applier_func(*args, **kwargs) + self.cache_add_attribute(guid, attribute, data) + + def process_group_policy(self, deleted_gpo_list, changed_gpo_list, + trust_dir=None, private_dir=None): + if trust_dir is None: + trust_dir = self.lp.cache_path('certs') + if private_dir is None: + private_dir = self.lp.private_path('certs') + if not os.path.exists(trust_dir): + os.mkdir(trust_dir, mode=0o755) + if not os.path.exists(private_dir): + os.mkdir(private_dir, mode=0o700) + + for guid, settings in deleted_gpo_list: + if str(self) in settings: + for ca_cn_enc, data in settings[str(self)].items(): + self.unapply(guid, ca_cn_enc, data) + + for gpo in changed_gpo_list: + if gpo.file_sys_path: + section = r'Software\Policies\Microsoft\Cryptography\AutoEnrollment' + pol_file = 'MACHINE/Registry.pol' + path = os.path.join(gpo.file_sys_path, pol_file) + pol_conf = self.parse(path) + if not pol_conf: + continue + for e in pol_conf.entries: + if e.keyname == section and e.valuename == 'AEPolicy': + # This policy applies as specified in [MS-CAESO] 4.4.5.1 + if e.data & 0x8000: + continue # The policy is disabled + enroll = e.data & 0x1 == 0x1 + manage = e.data & 0x2 == 0x2 + retrive_pending = e.data & 0x4 == 0x4 + if enroll: + ca_names = self.__enroll(gpo.name, + pol_conf.entries, + trust_dir, private_dir) + + # Cleanup any old CAs that have been removed + ca_attrs = [base64.b64encode(n.encode()).decode() + for n in ca_names] + self.clean(gpo.name, keep=ca_attrs) + else: + # If enrollment has been disabled for this GPO, + # remove any existing policy + ca_attrs = \ + self.cache_get_all_attribute_values(gpo.name) + self.clean(gpo.name, remove=list(ca_attrs.keys())) + + def __read_cep_data(self, guid, ldb, end_point_information, + trust_dir, private_dir): + """Read CEP Data. + + [MS-CAESO] 4.4.5.3.2.4 + In this step autoenrollment initializes instances of the + CertificateEnrollmentPolicy by accessing end points associated with CEP + groups created in the previous step. + """ + # For each group created in the previous step: + for end_point_group in end_point_information: + # Pick an arbitrary instance of the + # CertificateEnrollmentPolicyEndPoint from the group + e = end_point_group[0] + + # If this instance does not have the AutoEnrollmentEnabled flag set + # in the EndPoint.Flags, continue with the next group. + if not e['Flags'] & 0x10: + continue + + # If the current group contains a + # CertificateEnrollmentPolicyEndPoint instance with EndPoint.URI + # equal to "LDAP": + if any([e['URL'] == 'LDAP:' for e in end_point_group]): + # Perform an LDAP search to read the value of the objectGuid + # attribute of the root object of the forest root domain NC. If + # any errors are encountered, continue with the next group. + res = ldb.search('', SCOPE_BASE, '(objectClass=*)', + ['rootDomainNamingContext']) + if len(res) != 1: + continue + res2 = ldb.search(res[0]['rootDomainNamingContext'][0], + SCOPE_BASE, '(objectClass=*)', + ['objectGUID']) + if len(res2) != 1: + continue + + # Compare the value read in the previous step to the + # EndPoint.PolicyId datum CertificateEnrollmentPolicyEndPoint + # instance. If the values do not match, continue with the next + # group. + objectGUID = '{%s}' % \ + octet_string_to_objectGUID(res2[0]['objectGUID'][0]).upper() + if objectGUID != e['PolicyID']: + continue + + # For each CertificateEnrollmentPolicyEndPoint instance for that + # group: + ca_names = [] + for ca in end_point_group: + # If EndPoint.URI equals "LDAP": + if ca['URL'] == 'LDAP:': + # This is a basic configuration. + cas = fetch_certification_authorities(ldb) + for _ca in cas: + self.apply(guid, _ca, cert_enroll, _ca, ldb, trust_dir, + private_dir) + ca_names.append(_ca['name']) + # If EndPoint.URI starts with "HTTPS//": + elif ca['URL'].lower().startswith('https://'): + self.apply(guid, ca, cert_enroll, ca, ldb, trust_dir, + private_dir, auth=ca['auth']) + ca_names.append(ca['name']) + else: + edata = { 'endpoint': ca['URL'] } + log.error('Unrecognized endpoint', edata) + return ca_names + + def __enroll(self, guid, entries, trust_dir, private_dir): + url = 'ldap://%s' % get_dc_hostname(self.creds, self.lp) + ldb = Ldb(url=url, session_info=system_session(), + lp=self.lp, credentials=self.creds) + + ca_names = [] + end_point_information = obtain_end_point_information(entries) + if len(end_point_information) > 0: + ca_names.extend(self.__read_cep_data(guid, ldb, + end_point_information, + trust_dir, private_dir)) + else: + cas = fetch_certification_authorities(ldb) + for ca in cas: + self.apply(guid, ca, cert_enroll, ca, ldb, trust_dir, + private_dir) + ca_names.append(ca['name']) + return ca_names + + def rsop(self, gpo): + output = {} + pol_file = 'MACHINE/Registry.pol' + section = r'Software\Policies\Microsoft\Cryptography\AutoEnrollment' + if gpo.file_sys_path: + path = os.path.join(gpo.file_sys_path, pol_file) + pol_conf = self.parse(path) + if not pol_conf: + return output + for e in pol_conf.entries: + if e.keyname == section and e.valuename == 'AEPolicy': + enroll = e.data & 0x1 == 0x1 + if e.data & 0x8000 or not enroll: + continue + output['Auto Enrollment Policy'] = {} + url = 'ldap://%s' % get_dc_hostname(self.creds, self.lp) + ldb = Ldb(url=url, session_info=system_session(), + lp=self.lp, credentials=self.creds) + end_point_information = \ + obtain_end_point_information(pol_conf.entries) + cas = fetch_certification_authorities(ldb) + if len(end_point_information) > 0: + cas2 = [ep for sl in end_point_information for ep in sl] + if any([ca['URL'] == 'LDAP:' for ca in cas2]): + cas.extend(cas2) + else: + cas = cas2 + for ca in cas: + if 'URL' in ca and ca['URL'] == 'LDAP:': + continue + policy = 'Auto Enrollment Policy' + cn = ca['name'] + if policy not in output: + output[policy] = {} + output[policy][cn] = {} + if 'cACertificate' in ca: + output[policy][cn]['CA Certificate'] = \ + format_root_cert(ca['cACertificate']).decode() + output[policy][cn]['Auto Enrollment Server'] = \ + ca['hostname'] + supported_templates = \ + get_supported_templates(ca['hostname']) + output[policy][cn]['Templates'] = \ + [t.decode() for t in supported_templates] + return output diff --git a/python/samba/gp/gp_chromium_ext.py b/python/samba/gp/gp_chromium_ext.py new file mode 100644 index 0000000..5e54f0f --- /dev/null +++ b/python/samba/gp/gp_chromium_ext.py @@ -0,0 +1,473 @@ +# gp_chromium_ext samba gpo policy +# Copyright (C) David Mulder 2021 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os +import json +from samba.gp.gpclass import gp_pol_ext, gp_file_applier +from samba.dcerpc import misc +from samba.common import get_string +from samba.gp.util.logging import log +from tempfile import NamedTemporaryFile + +def parse_entry_data(name, e): + dict_entries = ['VirtualKeyboardFeatures', + 'DeviceArcDataSnapshotHours', + 'RequiredClientCertificateForDevice', + 'RequiredClientCertificateForUser', + 'RegisteredProtocolHandlers', + 'WebUsbAllowDevicesForUrls', + 'DeviceAutoUpdateTimeRestrictions', + 'DeviceUpdateStagingSchedule', + 'DeviceMinimumVersion', + 'DeviceDisplayResolution', + 'ExtensionSettings', + 'KerberosAccounts', + 'NetworkFileSharesPreconfiguredShares', + 'NetworkThrottlingEnabled', + 'TPMFirmwareUpdateSettings', + 'DeviceOffHours', + 'ParentAccessCodeConfig', + 'PerAppTimeLimits', + 'PerAppTimeLimitsWhitelist', + 'PerAppTimeLimitsAllowlist', + 'UsageTimeLimit', + 'PluginVmImage', + 'DeviceLoginScreenPowerManagement', + 'PowerManagementIdleSettings', + 'ScreenLockDelays', + 'ScreenBrightnessPercent', + 'DevicePowerPeakShiftDayConfig', + 'DeviceAdvancedBatteryChargeModeDayConfig', + 'PrintingPaperSizeDefault', + 'AutoLaunchProtocolsFromOrigins', + 'BrowsingDataLifetime', + 'DataLeakPreventionRulesList', + 'DeviceLoginScreenWebUsbAllowDevicesForUrls', + 'DeviceScheduledUpdateCheck', + 'KeyPermissions', + 'ManagedBookmarks', + 'ManagedConfigurationPerOrigin', + 'ProxySettings', + 'SystemProxySettings', + 'WebAppInstallForceList'] + bools = ['ShowAccessibilityOptionsInSystemTrayMenu', + 'LargeCursorEnabled', + 'SpokenFeedbackEnabled', + 'HighContrastEnabled', + 'VirtualKeyboardEnabled', + 'StickyKeysEnabled', + 'KeyboardDefaultToFunctionKeys', + 'DictationEnabled', + 'SelectToSpeakEnabled', + 'KeyboardFocusHighlightEnabled', + 'CursorHighlightEnabled', + 'CaretHighlightEnabled', + 'MonoAudioEnabled', + 'AccessibilityShortcutsEnabled', + 'AutoclickEnabled', + 'DeviceLoginScreenDefaultLargeCursorEnabled', + 'DeviceLoginScreenDefaultSpokenFeedbackEnabled', + 'DeviceLoginScreenDefaultHighContrastEnabled', + 'DeviceLoginScreenDefaultVirtualKeyboardEnabled', + 'DeviceLoginScreenLargeCursorEnabled', + 'DeviceLoginScreenSpokenFeedbackEnabled', + 'DeviceLoginScreenHighContrastEnabled', + 'DeviceLoginScreenVirtualKeyboardEnabled', + 'DeviceLoginScreenDictationEnabled', + 'DeviceLoginScreenSelectToSpeakEnabled', + 'DeviceLoginScreenCursorHighlightEnabled', + 'DeviceLoginScreenCaretHighlightEnabled', + 'DeviceLoginScreenMonoAudioEnabled', + 'DeviceLoginScreenAutoclickEnabled', + 'DeviceLoginScreenStickyKeysEnabled', + 'DeviceLoginScreenKeyboardFocusHighlightEnabled', + 'DeviceLoginScreenShowOptionsInSystemTrayMenu', + 'DeviceLoginScreenAccessibilityShortcutsEnabled', + 'FloatingAccessibilityMenuEnabled', + 'ArcEnabled', + 'UnaffiliatedArcAllowed', + 'AppRecommendationZeroStateEnabled', + 'DeviceBorealisAllowed', + 'UserBorealisAllowed', + 'SystemUse24HourClock', + 'DefaultSearchProviderEnabled', + 'ChromeOsReleaseChannelDelegated', + 'DeviceAutoUpdateDisabled', + 'DeviceAutoUpdateP2PEnabled', + 'DeviceUpdateHttpDownloadsEnabled', + 'RebootAfterUpdate', + 'BlockExternalExtensions', + 'VoiceInteractionContextEnabled', + 'VoiceInteractionHotwordEnabled', + 'EnableMediaRouter', + 'ShowCastIconInToolbar', + 'DriveDisabled', + 'DriveDisabledOverCellular', + 'DisableAuthNegotiateCnameLookup', + 'EnableAuthNegotiatePort', + 'BasicAuthOverHttpEnabled', + 'AuthNegotiateDelegateByKdcPolicy', + 'AllowCrossOriginAuthPrompt', + 'NtlmV2Enabled', + 'IntegratedWebAuthenticationAllowed', + 'BrowserSwitcherEnabled', + 'BrowserSwitcherKeepLastChromeTab', + 'BrowserSwitcherUseIeSitelist', + 'VirtualMachinesAllowed', + 'CrostiniAllowed', + 'DeviceUnaffiliatedCrostiniAllowed', + 'CrostiniExportImportUIAllowed', + 'CrostiniPortForwardingAllowed', + 'NativeMessagingUserLevelHosts', + 'NetworkFileSharesAllowed', + 'NetBiosShareDiscoveryEnabled', + 'NTLMShareAuthenticationEnabled', + 'DeviceDataRoamingEnabled', + 'DeviceWiFiFastTransitionEnabled', + 'DeviceWiFiAllowed', + 'DeviceAllowBluetooth', + 'DeviceAllowRedeemChromeOsRegistrationOffers', + 'DeviceQuirksDownloadEnabled', + 'SuggestedContentEnabled', + 'DeviceShowLowDiskSpaceNotification', + 'PasswordManagerEnabled', + 'PasswordLeakDetectionEnabled', + 'PluginVmAllowed', + 'PluginVmDataCollectionAllowed', + 'UserPluginVmAllowed', + 'DeviceRebootOnShutdown', + 'PowerManagementUsesAudioActivity', + 'PowerManagementUsesVideoActivity', + 'AllowWakeLocks', + 'AllowScreenWakeLocks', + 'WaitForInitialUserActivity', + 'PowerSmartDimEnabled', + 'DevicePowerPeakShiftEnabled', + 'DeviceBootOnAcEnabled', + 'DeviceAdvancedBatteryChargeModeEnabled', + 'DeviceUsbPowerShareEnabled', + 'PrintingEnabled', + 'CloudPrintProxyEnabled', + 'PrintingSendUsernameAndFilenameEnabled', + 'CloudPrintSubmitEnabled', + 'DisablePrintPreview', + 'PrintHeaderFooter', + 'PrintPreviewUseSystemDefaultPrinter', + 'UserNativePrintersAllowed', + 'UserPrintersAllowed', + 'DeletePrintJobHistoryAllowed', + 'DeviceLoginScreenPrivacyScreenEnabled', + 'PrivacyScreenEnabled', + 'PinUnlockWeakPinsAllowed', + 'PinUnlockAutosubmitEnabled', + 'RemoteAccessHostFirewallTraversal', + 'RemoteAccessHostRequireCurtain', + 'RemoteAccessHostAllowClientPairing', + 'RemoteAccessHostAllowRelayedConnection', + 'RemoteAccessHostAllowUiAccessForRemoteAssistance', + 'RemoteAccessHostAllowFileTransfer', + 'RemoteAccessHostAllowRemoteAccessConnections', + 'AttestationEnabledForUser', + 'SafeBrowsingEnabled', + 'SafeBrowsingExtendedReportingEnabled', + 'DeviceGuestModeEnabled', + 'DeviceAllowNewUsers', + 'DeviceShowUserNamesOnSignin', + 'DeviceEphemeralUsersEnabled', + 'DeviceShowNumericKeyboardForPassword', + 'DeviceFamilyLinkAccountsAllowed', + 'ShowHomeButton', + 'HomepageIsNewTabPage', + 'DeviceMetricsReportingEnabled', + 'DeviceWilcoDtcAllowed', + 'AbusiveExperienceInterventionEnforce', + 'AccessibilityImageLabelsEnabled', + 'AdditionalDnsQueryTypesEnabled', + 'AdvancedProtectionAllowed', + 'AllowDeletingBrowserHistory', + 'AllowDinosaurEasterEgg', + 'AllowFileSelectionDialogs', + 'AllowScreenLock', + 'AllowSyncXHRInPageDismissal', + 'AlternateErrorPagesEnabled', + 'AlwaysOpenPdfExternally', + 'AppCacheForceEnabled', + 'AudioCaptureAllowed', + 'AudioOutputAllowed', + 'AudioProcessHighPriorityEnabled', + 'AudioSandboxEnabled', + 'AutoFillEnabled', + 'AutofillAddressEnabled', + 'AutofillCreditCardEnabled', + 'AutoplayAllowed', + 'BackgroundModeEnabled', + 'BlockThirdPartyCookies', + 'BookmarkBarEnabled', + 'BrowserAddPersonEnabled', + 'BrowserGuestModeEnabled', + 'BrowserGuestModeEnforced', + 'BrowserLabsEnabled', + 'BrowserNetworkTimeQueriesEnabled', + 'BuiltInDnsClientEnabled', + 'CECPQ2Enabled', + 'CaptivePortalAuthenticationIgnoresProxy', + 'ChromeCleanupEnabled', + 'ChromeCleanupReportingEnabled', + 'ChromeOsLockOnIdleSuspend', + 'ClickToCallEnabled', + 'CloudManagementEnrollmentMandatory', + 'CloudPolicyOverridesPlatformPolicy', + 'CloudUserPolicyMerge', + 'CommandLineFlagSecurityWarningsEnabled', + 'ComponentUpdatesEnabled', + 'DNSInterceptionChecksEnabled', + 'DataLeakPreventionReportingEnabled', + 'DefaultBrowserSettingEnabled', + 'DefaultSearchProviderContextMenuAccessAllowed', + 'DeveloperToolsDisabled', + 'DeviceAllowMGSToStoreDisplayProperties', + 'DeviceDebugPacketCaptureAllowed', + 'DeviceLocalAccountManagedSessionEnabled', + 'DeviceLoginScreenPrimaryMouseButtonSwitch', + 'DevicePciPeripheralDataAccessEnabled', + 'DevicePowerwashAllowed', + 'DeviceSystemWideTracingEnabled', + 'Disable3DAPIs', + 'DisableSafeBrowsingProceedAnyway', + 'DisableScreenshots', + 'EasyUnlockAllowed', + 'EditBookmarksEnabled', + 'EmojiSuggestionEnabled', + 'EnableDeprecatedPrivetPrinting', + 'EnableOnlineRevocationChecks', + 'EnableSyncConsent', + 'EnterpriseHardwarePlatformAPIEnabled', + 'ExternalProtocolDialogShowAlwaysOpenCheckbox', + 'ExternalStorageDisabled', + 'ExternalStorageReadOnly', + 'ForceBrowserSignin', + 'ForceEphemeralProfiles', + 'ForceGoogleSafeSearch', + 'ForceMaximizeOnFirstRun', + 'ForceSafeSearch', + 'ForceYouTubeSafetyMode', + 'FullscreenAlertEnabled', + 'FullscreenAllowed', + 'GloballyScopeHTTPAuthCacheEnabled', + 'HardwareAccelerationModeEnabled', + 'HideWebStoreIcon', + 'ImportAutofillFormData', + 'ImportBookmarks', + 'ImportHistory', + 'ImportHomepage', + 'ImportSavedPasswords', + 'ImportSearchEngine', + 'IncognitoEnabled', + 'InsecureFormsWarningsEnabled', + 'InsecurePrivateNetworkRequestsAllowed', + 'InstantTetheringAllowed', + 'IntensiveWakeUpThrottlingEnabled', + 'JavascriptEnabled', + 'LacrosAllowed', + 'LacrosSecondaryProfilesAllowed', + 'LockScreenMediaPlaybackEnabled', + 'LoginDisplayPasswordButtonEnabled', + 'ManagedGuestSessionPrivacyWarningsEnabled', + 'MediaRecommendationsEnabled', + 'MediaRouterCastAllowAllIPs', + 'MetricsReportingEnabled', + 'NTPCardsVisible', + 'NTPCustomBackgroundEnabled', + 'NativeWindowOcclusionEnabled', + 'NearbyShareAllowed', + 'PaymentMethodQueryEnabled', + 'PdfAnnotationsEnabled', + 'PhoneHubAllowed', + 'PhoneHubNotificationsAllowed', + 'PhoneHubTaskContinuationAllowed', + 'PolicyAtomicGroupsEnabled', + 'PrimaryMouseButtonSwitch', + 'PromotionalTabsEnabled', + 'PromptForDownloadLocation', + 'QuicAllowed', + 'RendererCodeIntegrityEnabled', + 'RequireOnlineRevocationChecksForLocalAnchors', + 'RoamingProfileSupportEnabled', + 'SSLErrorOverrideAllowed', + 'SafeBrowsingForTrustedSourcesEnabled', + 'SavingBrowserHistoryDisabled', + 'ScreenCaptureAllowed', + 'ScrollToTextFragmentEnabled', + 'SearchSuggestEnabled', + 'SecondaryGoogleAccountSigninAllowed', + 'SharedArrayBufferUnrestrictedAccessAllowed', + 'SharedClipboardEnabled', + 'ShowAppsShortcutInBookmarkBar', + 'ShowFullUrlsInAddressBar', + 'ShowLogoutButtonInTray', + 'SignedHTTPExchangeEnabled', + 'SigninAllowed', + 'SigninInterceptionEnabled', + 'SitePerProcess', + 'SmartLockSigninAllowed', + 'SmsMessagesAllowed', + 'SpellCheckServiceEnabled', + 'SpellcheckEnabled', + 'StartupBrowserWindowLaunchSuppressed', + 'StricterMixedContentTreatmentEnabled', + 'SuggestLogoutAfterClosingLastWindow', + 'SuppressDifferentOriginSubframeDialogs', + 'SuppressUnsupportedOSWarning', + 'SyncDisabled', + 'TargetBlankImpliesNoOpener', + 'TaskManagerEndProcessEnabled', + 'ThirdPartyBlockingEnabled', + 'TouchVirtualKeyboardEnabled', + 'TranslateEnabled', + 'TripleDESEnabled', + 'UnifiedDesktopEnabledByDefault', + 'UrlKeyedAnonymizedDataCollectionEnabled', + 'UserAgentClientHintsEnabled', + 'UserFeedbackAllowed', + 'VideoCaptureAllowed', + 'VmManagementCliAllowed', + 'VpnConfigAllowed', + 'WPADQuickCheckEnabled', + 'WebRtcAllowLegacyTLSProtocols', + 'WebRtcEventLogCollectionAllowed', + 'WifiSyncAndroidAllowed', + 'WindowOcclusionEnabled'] + if name in dict_entries: + return json.loads(get_string(e.data)) + elif e.type == misc.REG_DWORD and name in bools: + return e.data == 1 + return e.data + +def assign_entry(policies, e): + if e.valuename.isnumeric(): + name = e.keyname.split('\\')[-1] + if name not in policies: + policies[name] = [] + policies[name].append(parse_entry_data(name, e)) + else: + name = e.valuename + policies[name] = parse_entry_data(name, e) + +def convert_pol_to_json(section, entries): + managed = {} + recommended = {} + recommended_section = '\\'.join([section, 'Recommended']) + for e in entries: + if '**delvals.' in e.valuename: + continue + if e.keyname.startswith(recommended_section): + assign_entry(recommended, e) + elif e.keyname.startswith(section): + assign_entry(managed, e) + return managed, recommended + +class gp_chromium_ext(gp_pol_ext, gp_file_applier): + managed_policies_path = '/etc/chromium/policies/managed' + recommended_policies_path = '/etc/chromium/policies/recommended' + + def __str__(self): + return 'Google/Chromium' + + def process_group_policy(self, deleted_gpo_list, changed_gpo_list, + policy_dir=None): + if policy_dir is not None: + self.recommended_policies_path = os.path.join(policy_dir, + 'recommended') + self.managed_policies_path = os.path.join(policy_dir, 'managed') + # Create the policy directories if necessary + if not os.path.exists(self.recommended_policies_path): + os.makedirs(self.recommended_policies_path, mode=0o755, + exist_ok=True) + if not os.path.exists(self.managed_policies_path): + os.makedirs(self.managed_policies_path, mode=0o755, + exist_ok=True) + for guid, settings in deleted_gpo_list: + if str(self) in settings: + for attribute, policies in settings[str(self)].items(): + try: + json.loads(policies) + except json.decoder.JSONDecodeError: + self.unapply(guid, attribute, policies) + else: + # Policies were previously stored all in one file, but + # the Chromium documentation says this is not + # necessary. Unapply the old policy file if json was + # stored in the cache (now we store a hash and file + # names instead). + if attribute == 'recommended': + fname = os.path.join(self.recommended_policies_path, + 'policies.json') + elif attribute == 'managed': + fname = os.path.join(self.managed_policies_path, + 'policies.json') + self.unapply(guid, attribute, fname) + + for gpo in changed_gpo_list: + if gpo.file_sys_path: + section = 'Software\\Policies\\Google\\Chrome' + pol_file = 'MACHINE/Registry.pol' + path = os.path.join(gpo.file_sys_path, pol_file) + pol_conf = self.parse(path) + if not pol_conf: + continue + + managed, recommended = convert_pol_to_json(section, + pol_conf.entries) + def applier_func(policies, location): + try: + with NamedTemporaryFile(mode='w+', prefix='gp_', + delete=False, + dir=location, + suffix='.json') as f: + json.dump(policies, f) + os.chmod(f.name, 0o644) + log.debug('Wrote Chromium preferences', policies) + return [f.name] + except PermissionError: + log.debug('Failed to write Chromium preferences', + policies) + value_hash = self.generate_value_hash(json.dumps(managed)) + self.apply(gpo.name, 'managed', value_hash, applier_func, + managed, self.managed_policies_path) + value_hash = self.generate_value_hash(json.dumps(recommended)) + self.apply(gpo.name, 'recommended', value_hash, applier_func, + recommended, self.recommended_policies_path) + + def rsop(self, gpo): + output = {} + pol_file = 'MACHINE/Registry.pol' + section = 'Software\\Policies\\Google\\Chrome' + if gpo.file_sys_path: + path = os.path.join(gpo.file_sys_path, pol_file) + pol_conf = self.parse(path) + if not pol_conf: + return output + for e in pol_conf.entries: + if e.keyname.startswith(section): + output['%s\\%s' % (e.keyname, e.valuename)] = e.data + return output + +class gp_chrome_ext(gp_chromium_ext): + managed_policies_path = '/etc/opt/chrome/policies/managed' + recommended_policies_path = '/etc/opt/chrome/policies/recommended' + + def __str__(self): + return 'Google/Chrome' diff --git a/python/samba/gp/gp_drive_maps_ext.py b/python/samba/gp/gp_drive_maps_ext.py new file mode 100644 index 0000000..f998d0e --- /dev/null +++ b/python/samba/gp/gp_drive_maps_ext.py @@ -0,0 +1,168 @@ +# gp_drive_maps_user_ext samba gpo policy +# Copyright (C) David Mulder 2020 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os +import json +from samba.gp.gpclass import gp_xml_ext, gp_misc_applier, drop_privileges, \ + expand_pref_variables +from subprocess import Popen, PIPE +from samba.gp.gp_scripts_ext import fetch_crontab, install_user_crontab +from samba.gp.util.logging import log +from samba.gp import gp_scripts_ext +gp_scripts_ext.intro = ''' +### autogenerated by samba +# +# This file is generated by the gp_drive_maps_user_ext Group Policy +# Client Side Extension. To modify the contents of this file, +# modify the appropriate Group Policy objects which apply +# to this machine. DO NOT MODIFY THIS FILE DIRECTLY. +# + +''' + +def mount_drive(uri): + log.debug('Mounting drive', uri) + out, err = Popen(['gio', 'mount', uri], + stdout=PIPE, stderr=PIPE).communicate() + if err: + if b'Location is already mounted' not in err: + raise SystemError(err) + +def unmount_drive(uri): + log.debug('Unmounting drive', uri) + return Popen(['gio', 'mount', uri, '--unmount']).wait() + +class gp_drive_maps_user_ext(gp_xml_ext, gp_misc_applier): + def parse_value(self, val): + vals = super().parse_value(val) + if 'props' in vals.keys(): + vals['props'] = json.loads(vals['props']) + if 'run_once' in vals.keys(): + vals['run_once'] = json.loads(vals['run_once']) + return vals + + def unapply(self, guid, uri, val): + vals = self.parse_value(val) + if 'props' in vals.keys() and \ + vals['props']['action'] in ['C', 'R', 'U']: + unmount_drive(uri) + others, entries = fetch_crontab(self.username) + if 'crontab' in vals.keys() and vals['crontab'] in entries: + entries.remove(vals['crontab']) + install_user_crontab(self.username, others, entries) + self.cache_remove_attribute(guid, uri) + + def apply(self, guid, uri, props, run_once, entry): + old_val = self.cache_get_attribute_value(guid, uri) + val = self.generate_value(props=json.dumps(props), + run_once=json.dumps(run_once), + crontab=entry) + + # The policy has changed, unapply it first + if old_val: + self.unapply(guid, uri, old_val) + + if props['action'] in ['C', 'R', 'U']: + mount_drive(uri) + elif props['action'] == 'D': + unmount_drive(uri) + if not run_once: + others, entries = fetch_crontab(self.username) + if entry not in entries: + entries.append(entry) + install_user_crontab(self.username, others, entries) + self.cache_add_attribute(guid, uri, val) + + def __str__(self): + return 'Preferences/Drives' + + def process_group_policy(self, deleted_gpo_list, changed_gpo_list): + for guid, settings in deleted_gpo_list: + if str(self) in settings: + for uri, val in settings[str(self)].items(): + self.unapply(guid, uri, val) + + for gpo in changed_gpo_list: + if gpo.file_sys_path: + xml = 'USER/Preferences/Drives/Drives.xml' + path = os.path.join(gpo.file_sys_path, xml) + xml_conf = drop_privileges('root', self.parse, path) + if not xml_conf: + continue + drives = xml_conf.findall('Drive') + attrs = [] + for drive in drives: + prop = drive.find('Properties') + if prop is None: + log.warning('Drive is missing Properties', drive.attrib) + continue + if prop.attrib['thisDrive'] == 'HIDE': + log.warning('Drive is hidden', prop.attrib) + continue # Don't mount a hidden drive + run_once = False + filters = drive.find('Filters') + if filters: + run_once_filter = filters.find('FilterRunOnce') + if run_once_filter is not None: + run_once = True + uri = 'smb:{}'.format(prop.attrib['path'].replace('\\', '/')) + # Ensure we expand the preference variables, or fail if we + # are unable to (the uri is invalid if we fail). + gptpath = os.path.join(gpo.file_sys_path, 'USER') + try: + uri = expand_pref_variables(uri, gptpath, self.lp, + username=self.username) + except NameError as e: + # If we fail expanding variables, then the URI is + # invalid and we can't continue processing this drive + # map. We can continue processing other drives, as they + # may succeed. This is not a critical error, since some + # Windows specific policies won't apply here. + log.warn('Failed to expand drive map variables: %s' % e, + prop.attrib) + continue + attrs.append(uri) + entry = '' + if not run_once: + if prop.attrib['action'] in ['C', 'R', 'U']: + entry = '@hourly gio mount {}'.format(uri) + elif prop.attrib['action'] == 'D': + entry = '@hourly gio mount {} --unmount'.format(uri) + self.apply(gpo.name, uri, prop.attrib, run_once, entry) + self.clean(gpo.name, keep=attrs) + + def rsop(self, gpo): + output = {} + if gpo.file_sys_path: + xml = 'USER/Preferences/Drives/Drives.xml' + path = os.path.join(gpo.file_sys_path, xml) + xml_conf = self.parse(path) + if not xml_conf: + return output + drives = xml_conf.findall('Drive') + for drive in drives: + prop = drive.find('Properties') + if prop is None: + continue + if prop.attrib['thisDrive'] == 'HIDE': + continue + uri = 'smb:{}'.format(prop.attrib['path'].replace('\\', '/')) + if prop.attrib['action'] in ['C', 'R', 'U']: + output[prop.attrib['label']] = 'gio mount {}'.format(uri) + elif prop.attrib['action'] == 'D': + output[prop.attrib['label']] = \ + 'gio mount {} --unmount'.format(uri) + return output diff --git a/python/samba/gp/gp_ext_loader.py b/python/samba/gp/gp_ext_loader.py new file mode 100644 index 0000000..705b973 --- /dev/null +++ b/python/samba/gp/gp_ext_loader.py @@ -0,0 +1,59 @@ +# Group Policy Client Side Extension Loader +# Copyright (C) David Mulder 2018 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from samba.gp.gpclass import list_gp_extensions +from samba.gp.gpclass import gp_ext +from samba.gp.util.logging import log + +try: + import importlib.util + + def import_file(name, location): + spec = importlib.util.spec_from_file_location(name, location) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + return module +except ImportError: + import imp + + def import_file(name, location): + return imp.load_source(name, location) + + +def get_gp_ext_from_module(name, mod): + if mod: + for k, v in vars(mod).items(): + if k == name and issubclass(v, gp_ext): + return v + return None + + +def get_gp_client_side_extensions(smb_conf): + user_exts = [] + machine_exts = [] + gp_exts = list_gp_extensions(smb_conf) + for gp_extension in gp_exts.values(): + module = import_file(gp_extension['ProcessGroupPolicy'], gp_extension['DllName']) + ext = get_gp_ext_from_module(gp_extension['ProcessGroupPolicy'], module) + if ext and gp_extension['MachinePolicy']: + machine_exts.append(ext) + log.info('Loaded machine extension from %s: %s' + % (gp_extension['DllName'], ext.__name__)) + if ext and gp_extension['UserPolicy']: + user_exts.append(ext) + log.info('Loaded user extension from %s: %s' + % (gp_extension['DllName'], ext.__name__)) + return (machine_exts, user_exts) diff --git a/python/samba/gp/gp_firefox_ext.py b/python/samba/gp/gp_firefox_ext.py new file mode 100644 index 0000000..a623314 --- /dev/null +++ b/python/samba/gp/gp_firefox_ext.py @@ -0,0 +1,219 @@ +# gp_firefox_ext samba gpo policy +# Copyright (C) David Mulder 2021 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os +import json +from samba.gp.gpclass import gp_pol_ext, gp_misc_applier +from samba.dcerpc import misc +from samba.common import get_string +from samba.gp.util.logging import log + +def parse_entry_data(e): + if e.type == misc.REG_MULTI_SZ: + data = get_string(e.data).replace('\x00', '') + return json.loads(data) + elif e.type == misc.REG_DWORD and e.data in [0, 1]: + return e.data == 1 + return e.data + +def convert_pol_to_json(section, entries): + result = {} + index_map = {} + for e in entries: + if not e.keyname.startswith(section): + continue + if '**delvals.' in e.valuename: + continue + sub_keys = e.keyname.replace(section, '').strip('\\') + if sub_keys: + sub_keys = sub_keys.split('\\') + current = result + index = -1 + if sub_keys[-1].isnumeric(): + name = '\\'.join(sub_keys[:-1]) + elif e.valuename.isnumeric(): + name = e.keyname + else: + name = '\\'.join([e.keyname, e.valuename]) + for i in range(len(sub_keys)): + if sub_keys[i] == 'PDFjs': + sub_keys[i] = 'PSFjs' + ctype = dict + if i == len(sub_keys)-1 and e.valuename.isnumeric(): + ctype = list + index = int(e.valuename) + if i < len(sub_keys)-1 and sub_keys[i+1].isnumeric(): + ctype = list + index = int(sub_keys[i+1]) + if type(current) == dict: + if sub_keys[i] not in current: + if ctype == dict: + current[sub_keys[i]] = {} + else: + current[sub_keys[i]] = [] + current = current[sub_keys[i]] + else: + if name not in index_map: + index_map[name] = {} + if index not in index_map[name].keys(): + if ctype == dict: + current.append({}) + else: + current.append([]) + index_map[name][index] = len(current)-1 + current = current[index_map[name][index]] + if type(current) == list: + current.append(parse_entry_data(e)) + else: + current[e.valuename] = parse_entry_data(e) + else: + result[e.valuename] = parse_entry_data(e) + return result + +class gp_firefox_ext(gp_pol_ext, gp_misc_applier): + firefox_installdir = '/etc/firefox/policies' + destfile = os.path.join(firefox_installdir, 'policies.json') + + def __str__(self): + return 'Mozilla/Firefox' + + def set_machine_policy(self, policies): + try: + os.makedirs(self.firefox_installdir, exist_ok=True) + with open(self.destfile, 'w') as f: + json.dump(policies, f) + log.debug('Wrote Firefox preferences', self.destfile) + except PermissionError: + log.debug('Failed to write Firefox preferences', + self.destfile) + + def get_machine_policy(self): + if os.path.exists(self.destfile): + with open(self.destfile, 'r') as r: + policies = json.load(r) + log.debug('Read Firefox preferences', self.destfile) + else: + policies = {'policies': {}} + return policies + + def parse_value(self, value): + data = super().parse_value(value) + for k, v in data.items(): + try: + data[k] = json.loads(v) + except json.decoder.JSONDecodeError: + pass + return data + + def unapply_policy(self, guid, policy, applied_val, val): + def set_val(policies, policy, val): + if val is None: + del policies[policy] + else: + policies[policy] = val + current = self.get_machine_policy() + if policy in current['policies'].keys(): + if applied_val is not None: + # Only restore policy if unmodified + if current['policies'][policy] == applied_val: + set_val(current['policies'], policy, val) + else: + set_val(current['policies'], policy, val) + self.set_machine_policy(current) + + def unapply(self, guid, policy, val): + cache = self.parse_value(val) + if policy == 'policies.json': + current = self.get_machine_policy() + for attr in current['policies'].keys(): + val = cache['old_val']['policies'][attr] \ + if attr in cache['old_val']['policies'] else None + self.unapply_policy(guid, attr, None, val) + else: + self.unapply_policy(guid, policy, + cache['new_val'] if 'new_val' in cache else None, + cache['old_val']) + self.cache_remove_attribute(guid, policy) + + def apply(self, guid, policy, val): + # If the policy has changed, unapply, then apply new policy + data = self.cache_get_attribute_value(guid, policy) + if data is not None: + self.unapply(guid, policy, data) + + current = self.get_machine_policy() + before = None + if policy in current['policies'].keys(): + before = current['policies'][policy] + + # Apply the policy and log the changes + new_value = self.generate_value(old_val=json.dumps(before), + new_val=json.dumps(val)) + current['policies'][policy] = val + self.set_machine_policy(current) + self.cache_add_attribute(guid, policy, get_string(new_value)) + + def process_group_policy(self, deleted_gpo_list, changed_gpo_list, + policy_dir=None): + if policy_dir is not None: + self.firefox_installdir = policy_dir + self.destfile = os.path.join(policy_dir, 'policies.json') + for guid, settings in deleted_gpo_list: + if str(self) in settings: + for policy, val in settings[str(self)].items(): + self.unapply(guid, policy, val) + + for gpo in changed_gpo_list: + if gpo.file_sys_path: + pol_file = 'MACHINE/Registry.pol' + section = 'Software\\Policies\\Mozilla\\Firefox' + path = os.path.join(gpo.file_sys_path, pol_file) + pol_conf = self.parse(path) + if not pol_conf: + continue + + # Unapply the old cache entry, if present + data = self.cache_get_attribute_value(gpo.name, 'policies.json') + if data is not None: + self.unapply(gpo.name, 'policies.json', data) + + policies = convert_pol_to_json(section, pol_conf.entries) + for policy, val in policies.items(): + self.apply(gpo.name, policy, val) + + # cleanup removed policies + self.clean(gpo.name, keep=policies.keys()) + + def rsop(self, gpo): + output = {} + pol_file = 'MACHINE/Registry.pol' + section = 'Software\\Policies\\Mozilla\\Firefox' + if gpo.file_sys_path: + path = os.path.join(gpo.file_sys_path, pol_file) + pol_conf = self.parse(path) + if not pol_conf: + return output + for e in pol_conf.entries: + if e.keyname.startswith(section): + output['%s\\%s' % (e.keyname, e.valuename)] = e.data + return output + +class gp_firefox_old_ext(gp_firefox_ext): + firefox_installdir = '/usr/lib64/firefox/distribution' + destfile = os.path.join(firefox_installdir, 'policies.json') + + def __str__(self): + return 'Mozilla/Firefox (old profile directory)' diff --git a/python/samba/gp/gp_firewalld_ext.py b/python/samba/gp/gp_firewalld_ext.py new file mode 100644 index 0000000..5e125b0 --- /dev/null +++ b/python/samba/gp/gp_firewalld_ext.py @@ -0,0 +1,171 @@ +# gp_firewalld_ext samba gpo policy +# Copyright (C) David Mulder 2021 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os +from subprocess import Popen, PIPE +from shutil import which +import json +from samba.gp.gpclass import gp_pol_ext, gp_applier +from samba.gp.util.logging import log + +def firewall_cmd(*args): + fw_cmd = which('firewall-cmd') + if fw_cmd is not None: + cmd = [fw_cmd] + cmd.extend(list(args)) + + p = Popen(cmd, stdout=PIPE, stderr=PIPE) + stdoutdata, _ = p.communicate() + return p.returncode, stdoutdata + else: + return -1, 'firewall-cmd not found' + +def rule_segment_parse(name, rule_segment): + if isinstance(rule_segment, str): + return ('%s=%s' % (name, rule_segment)) + ' ' + else: + return '%s %s ' % (name, + ' '.join(['%s=%s' % (k, v) for k, v in rule_segment.items()])) + +class gp_firewalld_ext(gp_pol_ext, gp_applier): + def __str__(self): + return 'Security/Firewalld' + + def apply_zone(self, guid, zone): + zone_attrs = [] + ret = firewall_cmd('--permanent', '--new-zone=%s' % zone)[0] + if ret != 0: + log.error('Failed to add new zone', zone) + else: + attribute = 'zone:%s' % zone + self.cache_add_attribute(guid, attribute, zone) + zone_attrs.append(attribute) + # Default to matching the interface(s) for the default zone + ret, out = firewall_cmd('--list-interfaces') + if ret != 0: + log.error('Failed to set interfaces for zone', zone) + for interface in out.strip().split(): + ret = firewall_cmd('--permanent', '--zone=%s' % zone, + '--add-interface=%s' % interface.decode()) + if ret != 0: + log.error('Failed to set interfaces for zone', zone) + return zone_attrs + + def apply_rules(self, guid, rule_dict): + rule_attrs = [] + for zone, rules in rule_dict.items(): + for rule in rules: + if 'rule' in rule: + rule_parsed = rule_segment_parse('rule', rule['rule']) + else: + rule_parsed = 'rule ' + for segment in ['source', 'destination', 'service', 'port', + 'protocol', 'icmp-block', 'masquerade', + 'icmp-type', 'forward-port', 'source-port', + 'log', 'audit']: + names = [s for s in rule.keys() if s.startswith(segment)] + for name in names: + rule_parsed += rule_segment_parse(name, rule[name]) + actions = set(['accept', 'reject', 'drop', 'mark']) + segments = set(rule.keys()) + action = actions.intersection(segments) + if len(action) == 1: + rule_parsed += rule_segment_parse(list(action)[0], + rule[list(action)[0]]) + else: + log.error('Invalid firewall rule syntax') + ret = firewall_cmd('--permanent', '--zone=%s' % zone, + '--add-rich-rule', rule_parsed.strip())[0] + if ret != 0: + log.error('Failed to add firewall rule', rule_parsed) + else: + rhash = self.generate_value_hash(rule_parsed) + attribute = 'rule:%s:%s' % (zone, rhash) + self.cache_add_attribute(guid, attribute, rule_parsed) + rule_attrs.append(attribute) + return rule_attrs + + def unapply(self, guid, attribute, value): + if attribute.startswith('zone'): + ret = firewall_cmd('--permanent', + '--delete-zone=%s' % value)[0] + if ret != 0: + log.error('Failed to remove zone', value) + else: + self.cache_remove_attribute(guid, attribute) + elif attribute.startswith('rule'): + _, zone, _ = attribute.split(':') + ret = firewall_cmd('--permanent', '--zone=%s' % zone, + '--remove-rich-rule', value)[0] + if ret != 0: + log.error('Failed to remove firewall rule', value) + else: + self.cache_remove_attribute(guid, attribute) + + def apply(self, applier_func, *args): + return applier_func(*args) + + def process_group_policy(self, deleted_gpo_list, changed_gpo_list): + for guid, settings in deleted_gpo_list: + if str(self) in settings: + for attribute, value in settings[str(self)].items(): + self.unapply(guid, attribute, value) + + for gpo in changed_gpo_list: + if gpo.file_sys_path: + section = 'Software\\Policies\\Samba\\Unix Settings\\Firewalld' + pol_file = 'MACHINE/Registry.pol' + path = os.path.join(gpo.file_sys_path, pol_file) + pol_conf = self.parse(path) + if not pol_conf: + continue + attrs = [] + for e in pol_conf.entries: + if e.keyname.startswith(section): + if e.keyname.endswith('Rules'): + attrs.extend(self.apply(self.apply_rules, gpo.name, + json.loads(e.data))) + elif e.keyname.endswith('Zones'): + if e.valuename == '**delvals.': + continue + attrs.extend(self.apply(self.apply_zone, gpo.name, + e.data)) + + # Cleanup all old zones and rules from this GPO + self.clean(gpo.name, keep=attrs) + + def rsop(self, gpo): + output = {} + pol_file = 'MACHINE/Registry.pol' + section = 'Software\\Policies\\Samba\\Unix Settings\\Firewalld' + if gpo.file_sys_path: + path = os.path.join(gpo.file_sys_path, pol_file) + pol_conf = self.parse(path) + if not pol_conf: + return output + for e in pol_conf.entries: + if e.keyname.startswith(section): + if e.keyname.endswith('Zones'): + if e.valuename == '**delvals.': + continue + if 'Zones' not in output.keys(): + output['Zones'] = [] + output['Zones'].append(e.data) + elif e.keyname.endswith('Rules'): + if 'Rules' not in output.keys(): + output['Rules'] = [] + output['Rules'].append(json.loads(e.data)) + return output diff --git a/python/samba/gp/gp_gnome_settings_ext.py b/python/samba/gp/gp_gnome_settings_ext.py new file mode 100644 index 0000000..567ab94 --- /dev/null +++ b/python/samba/gp/gp_gnome_settings_ext.py @@ -0,0 +1,418 @@ +# gp_gnome_settings_ext samba gpo policy +# Copyright (C) David Mulder 2020 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os, re +from samba.gp.gpclass import gp_pol_ext, gp_file_applier +from tempfile import NamedTemporaryFile +import shutil +from configparser import ConfigParser +from subprocess import Popen, PIPE +from samba.common import get_string +from glob import glob +import xml.etree.ElementTree as etree +from samba.gp.util.logging import log + +def dconf_update(test_dir): + if test_dir is not None: + return + dconf = shutil.which('dconf') + if dconf is None: + log.error('Failed to update dconf. Command not found') + return + p = Popen([dconf, 'update'], stdout=PIPE, stderr=PIPE) + out, err = p.communicate() + if p.returncode != 0: + log.error('Failed to update dconf', get_string(err)) + +def create_locks_dir(test_dir): + locks_dir = '/etc/dconf/db/local.d/locks' + if test_dir is not None: + locks_dir = os.path.join(test_dir, locks_dir[1:]) + os.makedirs(locks_dir, exist_ok=True) + return locks_dir + +def create_user_profile(test_dir): + user_profile = '/etc/dconf/profile/user' + if test_dir is not None: + user_profile = os.path.join(test_dir, user_profile[1:]) + if os.path.exists(user_profile): + return + os.makedirs(os.path.dirname(user_profile), exist_ok=True) + with NamedTemporaryFile('w', dir=os.path.dirname(user_profile), + delete=False) as w: + w.write('user-db:user\nsystem-db:local') + os.chmod(w.name, 0o644) + fname = w.name + shutil.move(fname, user_profile) + +def create_local_db(test_dir): + local_db = '/etc/dconf/db/local.d' + if test_dir is not None: + local_db = os.path.join(test_dir, local_db[1:]) + os.makedirs(local_db, exist_ok=True) + return local_db + +def select_next_conf(directory, fname=''): + configs = [re.match(r'(\d+)%s' % fname, f) for f in os.listdir(directory)] + return max([int(m.group(1)) for m in configs if m]+[0])+1 + +class gp_gnome_settings_ext(gp_pol_ext, gp_file_applier): + def __init__(self, *args): + super().__init__(*args) + self.keys = ['Compose Key', + 'Dim Screen when User is Idle', + 'Lock Down Specific Settings', + 'Whitelisted Online Accounts', + 'Enabled Extensions'] + self.lock_down_settings = {} + self.test_dir = None + + def __str__(self): + return 'GNOME Settings/Lock Down Settings' + + def __add_lockdown_data(self, k, e): + if k not in self.lock_down_settings: + self.lock_down_settings[k] = {} + self.lock_down_settings[k][e.valuename] = e.data + + def __enable_lockdown_data(self, e): + if e.valuename not in self.lock_down_settings: + self.lock_down_settings[e.valuename] = {} + self.lock_down_settings[e.valuename]['Enabled'] = e.data == 1 + + def __apply_compose_key(self, data): + create_user_profile(self.test_dir) + local_db_dir = create_local_db(self.test_dir) + + conf_id = select_next_conf(local_db_dir, '-input-sources') + local_db = os.path.join(local_db_dir, + '%010d-input-sources' % conf_id) + data_map = { 'Right Alt': 'compose:ralt', + 'Left Win': 'compose:lwin', + '3rd level of Left Win': 'compose:lwin-altgr', + 'Right Win': 'compose:rwin', + '3rd level of Right Win': 'compose:rwin-altgr', + 'Menu': 'compose:menu', + '3rd level of Menu': 'compose:menu-altgr', + 'Left Ctrl': 'compose:lctrl', + '3rd level of Left Ctrl': 'compose:lctrl-altgr', + 'Right Ctrl': 'compose:rctrl', + '3rd level of Right Ctrl': 'compose:rctrl-altgr', + 'Caps Lock': 'compose:caps', + '3rd level of Caps Lock': 'compose:caps-altgr', + 'The "< >" key': 'compose:102', + '3rd level of the "< >" key': 'compose:102-altgr', + 'Pause': 'compose:paus', + 'PrtSc': 'compose:prsc', + 'Scroll Lock': 'compose:sclk' + } + if data['Key Name'] not in data_map.keys(): + log.error('Compose Key not recognized', data) + return + parser = ConfigParser() + section = 'org/gnome/desktop/input-sources' + parser.add_section(section) + parser.set(section, 'xkb-options', + "['%s']" % data_map[data['Key Name']]) + with open(local_db, 'w') as w: + parser.write(w) + + # Lock xkb-options + locks_dir = create_locks_dir(self.test_dir) + conf_id = select_next_conf(locks_dir) + lock = os.path.join(locks_dir, '%010d-input-sources' % conf_id) + with open(lock, 'w') as w: + w.write('/org/gnome/desktop/input-sources/xkb-options') + + dconf_update(self.test_dir) + return [local_db, lock] + + def __apply_dim_idle(self, data): + create_user_profile(self.test_dir) + local_db_dir = create_local_db(self.test_dir) + conf_id = select_next_conf(local_db_dir, '-power') + local_power_db = os.path.join(local_db_dir, '%010d-power' % conf_id) + parser = ConfigParser() + section = 'org/gnome/settings-daemon/plugins/power' + parser.add_section(section) + parser.set(section, 'idle-dim', 'true') + parser.set(section, 'idle-brightness', str(data['Dim Idle Brightness'])) + with open(local_power_db, 'w') as w: + parser.write(w) + conf_id = select_next_conf(local_db_dir, '-session') + local_session_db = os.path.join(local_db_dir, '%010d-session' % conf_id) + parser = ConfigParser() + section = 'org/gnome/desktop/session' + parser.add_section(section) + parser.set(section, 'idle-delay', 'uint32 %d' % data['Delay']) + with open(local_session_db, 'w') as w: + parser.write(w) + + # Lock power-saving + locks_dir = create_locks_dir(self.test_dir) + conf_id = select_next_conf(locks_dir) + lock = os.path.join(locks_dir, '%010d-power-saving' % conf_id) + with open(lock, 'w') as w: + w.write('/org/gnome/settings-daemon/plugins/power/idle-dim\n') + w.write('/org/gnome/settings-daemon/plugins/power/idle-brightness\n') + w.write('/org/gnome/desktop/session/idle-delay') + + dconf_update(self.test_dir) + return [local_power_db, local_session_db, lock] + + def __apply_specific_settings(self, data): + create_user_profile(self.test_dir) + locks_dir = create_locks_dir(self.test_dir) + conf_id = select_next_conf(locks_dir, '-group-policy') + policy_file = os.path.join(locks_dir, '%010d-group-policy' % conf_id) + with open(policy_file, 'w') as w: + for key in data.keys(): + w.write('%s\n' % key) + dconf_update(self.test_dir) + return [policy_file] + + def __apply_whitelisted_account(self, data): + create_user_profile(self.test_dir) + local_db_dir = create_local_db(self.test_dir) + locks_dir = create_locks_dir(self.test_dir) + val = "['%s']" % "', '".join(data.keys()) + policy_files = self.__lockdown(local_db_dir, locks_dir, 'goa', + 'whitelisted-providers', val, + 'org/gnome/online-accounts') + dconf_update(self.test_dir) + return policy_files + + def __apply_enabled_extensions(self, data): + create_user_profile(self.test_dir) + local_db_dir = create_local_db(self.test_dir) + conf_id = select_next_conf(local_db_dir) + policy_file = os.path.join(local_db_dir, '%010d-extensions' % conf_id) + parser = ConfigParser() + section = 'org/gnome/shell' + parser.add_section(section) + exts = data.keys() + parser.set(section, 'enabled-extensions', "['%s']" % "', '".join(exts)) + parser.set(section, 'development-tools', 'false') + with open(policy_file, 'w') as w: + parser.write(w) + dconf_update(self.test_dir) + return [policy_file] + + def __lockdown(self, local_db_dir, locks_dir, name, key, val, + section='org/gnome/desktop/lockdown'): + policy_files = [] + conf_id = select_next_conf(local_db_dir) + policy_file = os.path.join(local_db_dir, + '%010d-%s' % (conf_id, name)) + policy_files.append(policy_file) + conf_id = select_next_conf(locks_dir) + lock = os.path.join(locks_dir, '%010d-%s' % (conf_id, name)) + policy_files.append(lock) + parser = ConfigParser() + parser.add_section(section) + parser.set(section, key, val) + with open(policy_file, 'w') as w: + parser.write(w) + with open(lock, 'w') as w: + w.write('/%s/%s' % (section, key)) + return policy_files + + def __apply_enabled(self, k): + policy_files = [] + + create_user_profile(self.test_dir) + local_db_dir = create_local_db(self.test_dir) + locks_dir = create_locks_dir(self.test_dir) + + if k == 'Lock Down Enabled Extensions': + conf_id = select_next_conf(locks_dir) + policy_file = os.path.join(locks_dir, '%010d-extensions' % conf_id) + policy_files.append(policy_file) + with open(policy_file, 'w') as w: + w.write('/org/gnome/shell/enabled-extensions\n') + w.write('/org/gnome/shell/development-tools') + elif k == 'Disable Printing': + policy_files = self.__lockdown(local_db_dir, locks_dir, 'printing', + 'disable-printing', 'true') + elif k == 'Disable File Saving': + policy_files = self.__lockdown(local_db_dir, locks_dir, + 'filesaving', + 'disable-save-to-disk', 'true') + elif k == 'Disable Command-Line Access': + policy_files = self.__lockdown(local_db_dir, locks_dir, 'cmdline', + 'disable-command-line', 'true') + elif k == 'Disallow Login Using a Fingerprint': + policy_files = self.__lockdown(local_db_dir, locks_dir, + 'fingerprintreader', + 'enable-fingerprint-authentication', + 'false', + section='org/gnome/login-screen') + elif k == 'Disable User Logout': + policy_files = self.__lockdown(local_db_dir, locks_dir, 'logout', + 'disable-log-out', 'true') + elif k == 'Disable User Switching': + policy_files = self.__lockdown(local_db_dir, locks_dir, 'logout', + 'disable-user-switching', 'true') + elif k == 'Disable Repartitioning': + actions = '/usr/share/polkit-1/actions' + udisk2 = glob(os.path.join(actions, + 'org.freedesktop.[u|U][d|D]isks2.policy')) + if len(udisk2) == 1: + udisk2 = udisk2[0] + else: + udisk2 = os.path.join(actions, + 'org.freedesktop.UDisks2.policy') + udisk2_etc = os.path.join('/etc/share/polkit-1/actions', + os.path.basename(udisk2)) + if self.test_dir is not None: + udisk2_etc = os.path.join(self.test_dir, udisk2_etc[1:]) + os.makedirs(os.path.dirname(udisk2_etc), exist_ok=True) + xml_data = etree.ElementTree(etree.Element('policyconfig')) + if os.path.exists(udisk2): + with open(udisk2, 'rb') as f: + data = f.read() + existing_xml = etree.ElementTree(etree.fromstring(data)) + root = xml_data.getroot() + root.append(existing_xml.find('vendor')) + root.append(existing_xml.find('vendor_url')) + root.append(existing_xml.find('icon_name')) + else: + vendor = etree.SubElement(xml_data.getroot(), 'vendor') + vendor.text = 'The Udisks Project' + vendor_url = etree.SubElement(xml_data.getroot(), 'vendor_url') + vendor_url.text = 'https://github.com/storaged-project/udisks' + icon_name = etree.SubElement(xml_data.getroot(), 'icon_name') + icon_name.text = 'drive-removable-media' + action = etree.SubElement(xml_data.getroot(), 'action') + action.attrib['id'] = 'org.freedesktop.udisks2.modify-device' + description = etree.SubElement(action, 'description') + description.text = 'Modify the drive settings' + message = etree.SubElement(action, 'message') + message.text = 'Authentication is required to modify drive settings' + defaults = etree.SubElement(action, 'defaults') + allow_any = etree.SubElement(defaults, 'allow_any') + allow_any.text = 'no' + allow_inactive = etree.SubElement(defaults, 'allow_inactive') + allow_inactive.text = 'no' + allow_active = etree.SubElement(defaults, 'allow_active') + allow_active.text = 'yes' + with open(udisk2_etc, 'wb') as w: + xml_data.write(w, encoding='UTF-8', xml_declaration=True) + policy_files.append(udisk2_etc) + else: + log.error('Unable to apply', k) + return + dconf_update(self.test_dir) + return policy_files + + def __clean_data(self, k): + data = self.lock_down_settings[k] + return {i: data[i] for i in data.keys() if i != 'Enabled'} + + def process_group_policy(self, deleted_gpo_list, changed_gpo_list, + test_dir=None): + if test_dir is not None: + self.test_dir = test_dir + for guid, settings in deleted_gpo_list: + if str(self) in settings: + for attribute, value in settings[str(self)].items(): + self.unapply(guid, attribute, value, sep=';') + dconf_update(test_dir) + + for gpo in changed_gpo_list: + if gpo.file_sys_path: + section_name = 'GNOME Settings\\Lock Down Settings' + pol_file = 'MACHINE/Registry.pol' + path = os.path.join(gpo.file_sys_path, pol_file) + pol_conf = self.parse(path) + if not pol_conf: + continue + for e in pol_conf.entries: + if e.keyname.startswith(section_name) and e.data and \ + '**delvals.' not in e.valuename: + for k in self.keys: + if e.keyname.endswith(k): + self.__add_lockdown_data(k, e) + break + else: + self.__enable_lockdown_data(e) + for k in self.lock_down_settings.keys(): + # Ignore disabled preferences + if not self.lock_down_settings[k]['Enabled']: + # Unapply the disabled preference if previously applied + self.clean(gpo.name, remove=k) + continue + + # Apply using the appropriate applier + data = str(self.lock_down_settings[k]) + value_hash = self.generate_value_hash(data) + if k == self.keys[0]: + self.apply(gpo.name, k, value_hash, + self.__apply_compose_key, + self.__clean_data(k), sep=';') + elif k == self.keys[1]: + self.apply(gpo.name, k, value_hash, + self.__apply_dim_idle, + self.__clean_data(k), sep=';') + elif k == self.keys[2]: + self.apply(gpo.name, k, value_hash, + self.__apply_specific_settings, + self.__clean_data(k), sep=';') + elif k == self.keys[3]: + self.apply(gpo.name, k, value_hash, + self.__apply_whitelisted_account, + self.__clean_data(k), sep=';') + elif k == self.keys[4]: + self.apply(gpo.name, k, value_hash, + self.__apply_enabled_extensions, + self.__clean_data(k), sep=';') + else: + self.apply(gpo.name, k, value_hash, + self.__apply_enabled, + k, sep=';') + + # Unapply any policy that has been removed + self.clean(gpo.name, keep=self.lock_down_settings.keys()) + + def rsop(self, gpo): + output = {} + if gpo.file_sys_path: + section_name = 'GNOME Settings\\Lock Down Settings' + pol_file = 'MACHINE/Registry.pol' + path = os.path.join(gpo.file_sys_path, pol_file) + pol_conf = self.parse(path) + if not pol_conf: + return output + for e in pol_conf.entries: + if e.keyname.startswith(section_name) and e.data and \ + '**delvals.' not in e.valuename: + for k in self.keys: + if e.keyname.endswith(k): + self.__add_lockdown_data(k, e) + break + else: + self.__enable_lockdown_data(e) + for k in self.lock_down_settings.keys(): + if self.lock_down_settings[k]['Enabled']: + if len(self.lock_down_settings[k]) > 1: + data = self.__clean_data(k) + if all([i == data[i] for i in data.keys()]): + output[k] = list(data.keys()) + else: + output[k] = data + else: + output[k] = self.lock_down_settings[k] + return output diff --git a/python/samba/gp/gp_msgs_ext.py b/python/samba/gp/gp_msgs_ext.py new file mode 100644 index 0000000..9aadddf --- /dev/null +++ b/python/samba/gp/gp_msgs_ext.py @@ -0,0 +1,96 @@ +# gp_msgs_ext samba gpo policy +# Copyright (C) David Mulder 2020 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os +from samba.gp.gpclass import gp_pol_ext, gp_misc_applier + +class gp_msgs_ext(gp_pol_ext, gp_misc_applier): + def unapply(self, guid, cdir, attribute, value): + if attribute not in ['motd', 'issue']: + raise ValueError('"%s" is not a message attribute' % attribute) + data = self.parse_value(value) + mfile = os.path.join(cdir, attribute) + if os.path.exists(mfile): + with open(mfile, 'r') as f: + current = f.read() + else: + current = '' + # Only overwrite the msg if it hasn't been modified. It may have been + # modified by another GPO. + if 'new_val' not in data or current.strip() == data['new_val'].strip(): + msg = data['old_val'] + with open(mfile, 'w') as w: + if msg: + w.write(msg) + else: + w.truncate() + self.cache_remove_attribute(guid, attribute) + + def apply(self, guid, cdir, entries): + section_name = 'Software\\Policies\\Samba\\Unix Settings\\Messages' + for e in entries: + if e.keyname == section_name and e.data.strip(): + if e.valuename not in ['motd', 'issue']: + raise ValueError('"%s" is not a message attribute' % + e.valuename) + mfile = os.path.join(cdir, e.valuename) + if os.path.exists(mfile): + with open(mfile, 'r') as f: + old_val = f.read() + else: + old_val = '' + # If policy is already applied, skip application + if old_val.strip() == e.data.strip(): + return + with open(mfile, 'w') as w: + w.write(e.data) + data = self.generate_value(old_val=old_val, new_val=e.data) + self.cache_add_attribute(guid, e.valuename, data) + + def __str__(self): + return 'Unix Settings/Messages' + + def process_group_policy(self, deleted_gpo_list, changed_gpo_list, + cdir='/etc'): + for guid, settings in deleted_gpo_list: + if str(self) in settings: + for attribute, msg in settings[str(self)].items(): + self.unapply(guid, cdir, attribute, msg) + + for gpo in changed_gpo_list: + if gpo.file_sys_path: + section_name = 'Software\\Policies\\Samba\\Unix Settings\\Messages' + pol_file = 'MACHINE/Registry.pol' + path = os.path.join(gpo.file_sys_path, pol_file) + pol_conf = self.parse(path) + if not pol_conf: + continue + self.apply(gpo.name, cdir, pol_conf.entries) + + def rsop(self, gpo): + output = {} + if gpo.file_sys_path: + section_name = 'Software\\Policies\\Samba\\Unix Settings\\Messages' + pol_file = 'MACHINE/Registry.pol' + path = os.path.join(gpo.file_sys_path, pol_file) + pol_conf = self.parse(path) + if not pol_conf: + return output + for e in pol_conf.entries: + if e.keyname == section_name and e.data.strip(): + mfile = os.path.join('/etc', e.valuename) + output[mfile] = e.data + return output diff --git a/python/samba/gp/gp_scripts_ext.py b/python/samba/gp/gp_scripts_ext.py new file mode 100644 index 0000000..998b9cd --- /dev/null +++ b/python/samba/gp/gp_scripts_ext.py @@ -0,0 +1,187 @@ +# gp_scripts_ext samba gpo policy +# Copyright (C) David Mulder 2020 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os, re +from subprocess import Popen, PIPE +from samba.gp.gpclass import gp_pol_ext, drop_privileges, gp_file_applier, \ + gp_misc_applier +from tempfile import NamedTemporaryFile +from samba.gp.util.logging import log + +intro = ''' +### autogenerated by samba +# +# This file is generated by the gp_scripts_ext Group Policy +# Client Side Extension. To modify the contents of this file, +# modify the appropriate Group Policy objects which apply +# to this machine. DO NOT MODIFY THIS FILE DIRECTLY. +# + +''' +end = ''' +### autogenerated by samba ### +''' + +class gp_scripts_ext(gp_pol_ext, gp_file_applier): + def __str__(self): + return 'Unix Settings/Scripts' + + def process_group_policy(self, deleted_gpo_list, changed_gpo_list, cdir=None): + for guid, settings in deleted_gpo_list: + if str(self) in settings: + for attribute, script in settings[str(self)].items(): + self.unapply(guid, attribute, script) + + for gpo in changed_gpo_list: + if gpo.file_sys_path: + reg_key = 'Software\\Policies\\Samba\\Unix Settings' + sections = { '%s\\Daily Scripts' % reg_key : '/etc/cron.daily', + '%s\\Monthly Scripts' % reg_key : '/etc/cron.monthly', + '%s\\Weekly Scripts' % reg_key : '/etc/cron.weekly', + '%s\\Hourly Scripts' % reg_key : '/etc/cron.hourly' } + pol_file = 'MACHINE/Registry.pol' + path = os.path.join(gpo.file_sys_path, pol_file) + pol_conf = self.parse(path) + if not pol_conf: + continue + policies = {} + for e in pol_conf.entries: + if e.keyname in sections.keys() and e.data.strip(): + if e.keyname not in policies: + policies[e.keyname] = [] + policies[e.keyname].append(e.data) + def applier_func(keyname, entries): + ret = [] + cron_dir = sections[keyname] if not cdir else cdir + for data in entries: + with NamedTemporaryFile(prefix='gp_', mode="w+", + delete=False, dir=cron_dir) as f: + contents = '#!/bin/sh\n%s' % intro + contents += '%s\n' % data + f.write(contents) + os.chmod(f.name, 0o700) + ret.append(f.name) + return ret + for keyname, entries in policies.items(): + # Each GPO applies only one set of each type of script, so + # so the attribute matches the keyname. + attribute = keyname + # The value hash is generated from the script entries, + # ensuring any changes to this GPO will cause the scripts + # to be rewritten. + value_hash = self.generate_value_hash(*entries) + self.apply(gpo.name, attribute, value_hash, applier_func, + keyname, entries) + + # Cleanup any old scripts that are no longer part of the policy + self.clean(gpo.name, keep=policies.keys()) + + def rsop(self, gpo, target='MACHINE'): + output = {} + pol_file = '%s/Registry.pol' % target + if gpo.file_sys_path: + path = os.path.join(gpo.file_sys_path, pol_file) + pol_conf = self.parse(path) + if not pol_conf: + return output + for e in pol_conf.entries: + key = e.keyname.split('\\')[-1] + if key.endswith('Scripts') and e.data.strip(): + if key not in output.keys(): + output[key] = [] + output[key].append(e.data) + return output + +def fetch_crontab(username): + p = Popen(['crontab', '-l', '-u', username], stdout=PIPE, stderr=PIPE) + out, err = p.communicate() + if p.returncode != 0: + log.warning('Failed to read the crontab: %s' % err) + m = re.findall('%s(.*)%s' % (intro, end), out.decode(), re.DOTALL) + if len(m) == 1: + entries = m[0].strip().split('\n') + else: + entries = [] + m = re.findall('(.*)%s.*%s(.*)' % (intro, end), out.decode(), re.DOTALL) + if len(m) == 1: + others = '\n'.join([l.strip() for l in m[0]]) + else: + others = out.decode() + return others, entries + +def install_crontab(fname, username): + p = Popen(['crontab', fname, '-u', username], stdout=PIPE, stderr=PIPE) + _, err = p.communicate() + if p.returncode != 0: + raise RuntimeError('Failed to install crontab: %s' % err) + +def install_user_crontab(username, others, entries): + with NamedTemporaryFile() as f: + if len(entries) > 0: + f.write('\n'.join([others, intro, + '\n'.join(entries), end]).encode()) + else: + f.write(others.encode()) + f.flush() + install_crontab(f.name, username) + +class gp_user_scripts_ext(gp_scripts_ext, gp_misc_applier): + def unapply(self, guid, attribute, entry): + others, entries = fetch_crontab(self.username) + if entry in entries: + entries.remove(entry) + install_user_crontab(self.username, others, entries) + self.cache_remove_attribute(guid, attribute) + + def apply(self, guid, attribute, entry): + old_val = self.cache_get_attribute_value(guid, attribute) + others, entries = fetch_crontab(self.username) + if not old_val or entry not in entries: + entries.append(entry) + install_user_crontab(self.username, others, entries) + self.cache_add_attribute(guid, attribute, entry) + + def process_group_policy(self, deleted_gpo_list, changed_gpo_list): + for guid, settings in deleted_gpo_list: + if str(self) in settings: + for attribute, entry in settings[str(self)].items(): + self.unapply(guid, attribute, entry) + + for gpo in changed_gpo_list: + if gpo.file_sys_path: + reg_key = 'Software\\Policies\\Samba\\Unix Settings' + sections = { '%s\\Daily Scripts' % reg_key : '@daily', + '%s\\Monthly Scripts' % reg_key : '@monthly', + '%s\\Weekly Scripts' % reg_key : '@weekly', + '%s\\Hourly Scripts' % reg_key : '@hourly' } + pol_file = 'USER/Registry.pol' + path = os.path.join(gpo.file_sys_path, pol_file) + pol_conf = drop_privileges('root', self.parse, path) + if not pol_conf: + continue + attrs = [] + for e in pol_conf.entries: + if e.keyname in sections.keys() and e.data.strip(): + cron_freq = sections[e.keyname] + attribute = '%s:%s' % (e.keyname, + self.generate_attribute(e.data)) + attrs.append(attribute) + entry = '%s %s' % (cron_freq, e.data) + self.apply(gpo.name, attribute, entry) + self.clean(gpo.name, keep=attrs) + + def rsop(self, gpo): + return super().rsop(gpo, target='USER') diff --git a/python/samba/gp/gp_sec_ext.py b/python/samba/gp/gp_sec_ext.py new file mode 100644 index 0000000..39b9cdc --- /dev/null +++ b/python/samba/gp/gp_sec_ext.py @@ -0,0 +1,221 @@ +# gp_sec_ext kdc gpo policy +# Copyright (C) Luke Morrison 2013 +# Copyright (C) David Mulder 2018 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os.path +from samba.gp.gpclass import gp_inf_ext +from samba.auth import system_session +from samba.common import get_string +try: + from ldb import LdbError + from samba.samdb import SamDB +except ImportError: + pass +from samba.gp.util.logging import log + +def mins_to_hours(val): + return '%d' % (int(val) / 60) + +def days_to_hours(val): + return '%d' % (int(val) * 24) + +def days2rel_nttime(val): + seconds = 60 + minutes = 60 + hours = 24 + sam_add = 10000000 + val = int(val) + return str(-(val * seconds * minutes * hours * sam_add)) + +class gp_krb_ext(gp_inf_ext): + apply_map = { 'MaxTicketAge': 'kdc:user_ticket_lifetime', + 'MaxServiceAge': 'kdc:service_ticket_lifetime', + 'MaxRenewAge': 'kdc:renewal_lifetime' } + def process_group_policy(self, deleted_gpo_list, changed_gpo_list): + if self.lp.get('server role') != 'active directory domain controller': + return + inf_file = 'MACHINE/Microsoft/Windows NT/SecEdit/GptTmpl.inf' + for guid, settings in deleted_gpo_list: + self.gp_db.set_guid(guid) + for section in settings.keys(): + if section == str(self): + for att, value in settings[section].items(): + self.set_kdc_tdb(att, value) + self.gp_db.delete(section, att) + self.gp_db.commit() + + for gpo in changed_gpo_list: + if gpo.file_sys_path: + self.gp_db.set_guid(gpo.name) + path = os.path.join(gpo.file_sys_path, inf_file) + inf_conf = self.parse(path) + if not inf_conf: + continue + for section in inf_conf.sections(): + if section == str(self): + for key, value in inf_conf.items(section): + if key not in gp_krb_ext.apply_map: + continue + att = gp_krb_ext.apply_map[key] + value_func = self.mapper().get(att) + self.set_kdc_tdb(att, value_func(value)) + self.gp_db.commit() + + def set_kdc_tdb(self, attribute, val): + old_val = self.gp_db.gpostore.get(attribute) + log.info('%s was changed from %s to %s' % (attribute, old_val, val)) + if val is not None: + self.gp_db.gpostore.store(attribute, get_string(val)) + self.gp_db.store(str(self), attribute, get_string(old_val) + if old_val else None) + else: + self.gp_db.gpostore.delete(attribute) + self.gp_db.delete(str(self), attribute) + + def mapper(self): + return {'kdc:user_ticket_lifetime': lambda val: val, + 'kdc:service_ticket_lifetime': mins_to_hours, + 'kdc:renewal_lifetime': days_to_hours, + } + + def __str__(self): + return 'Kerberos Policy' + + def rsop(self, gpo): + output = {} + if self.lp.get('server role') != 'active directory domain controller': + return output + inf_file = 'MACHINE/Microsoft/Windows NT/SecEdit/GptTmpl.inf' + if gpo.file_sys_path: + path = os.path.join(gpo.file_sys_path, inf_file) + inf_conf = self.parse(path) + if not inf_conf: + return output + if str(self) in inf_conf.sections(): + section = str(self) + output[section] = {k: v for k, v in inf_conf.items(section) + if gp_krb_ext.apply_map.get(k)} + return output + + +class gp_access_ext(gp_inf_ext): + """This class takes the .inf file parameter (essentially a GPO file mapped + to a GUID), hashmaps it to the Samba parameter, which then uses an ldb + object to update the parameter to Samba4. Not registry oriented whatsoever. + """ + + def load_ldb(self): + try: + self.ldb = SamDB(self.lp.samdb_url(), + session_info=system_session(), + credentials=self.creds, + lp=self.lp) + except (NameError, LdbError): + raise Exception('Failed to load SamDB for assigning Group Policy') + + apply_map = { 'MinimumPasswordAge': 'minPwdAge', + 'MaximumPasswordAge': 'maxPwdAge', + 'MinimumPasswordLength': 'minPwdLength', + 'PasswordComplexity': 'pwdProperties' } + def process_group_policy(self, deleted_gpo_list, changed_gpo_list): + if self.lp.get('server role') != 'active directory domain controller': + return + self.load_ldb() + inf_file = 'MACHINE/Microsoft/Windows NT/SecEdit/GptTmpl.inf' + for guid, settings in deleted_gpo_list: + self.gp_db.set_guid(guid) + for section in settings.keys(): + if section == str(self): + for att, value in settings[section].items(): + update_samba, _ = self.mapper().get(att) + update_samba(att, value) + self.gp_db.delete(section, att) + self.gp_db.commit() + + for gpo in changed_gpo_list: + if gpo.file_sys_path: + self.gp_db.set_guid(gpo.name) + path = os.path.join(gpo.file_sys_path, inf_file) + inf_conf = self.parse(path) + if not inf_conf: + continue + for section in inf_conf.sections(): + if section == str(self): + for key, value in inf_conf.items(section): + if key not in gp_access_ext.apply_map: + continue + att = gp_access_ext.apply_map[key] + (update_samba, value_func) = self.mapper().get(att) + update_samba(att, value_func(value)) + self.gp_db.commit() + + def ch_minPwdAge(self, attribute, val): + old_val = self.ldb.get_minPwdAge() + log.info('KDC Minimum Password age was changed from %s to %s' + % (old_val, val)) + self.gp_db.store(str(self), attribute, str(old_val)) + self.ldb.set_minPwdAge(val) + + def ch_maxPwdAge(self, attribute, val): + old_val = self.ldb.get_maxPwdAge() + log.info('KDC Maximum Password age was changed from %s to %s' + % (old_val, val)) + self.gp_db.store(str(self), attribute, str(old_val)) + self.ldb.set_maxPwdAge(val) + + def ch_minPwdLength(self, attribute, val): + old_val = self.ldb.get_minPwdLength() + log.info('KDC Minimum Password length was changed from %s to %s' + % (old_val, val)) + self.gp_db.store(str(self), attribute, str(old_val)) + self.ldb.set_minPwdLength(val) + + def ch_pwdProperties(self, attribute, val): + old_val = self.ldb.get_pwdProperties() + log.info('KDC Password Properties were changed from %s to %s' + % (old_val, val)) + self.gp_db.store(str(self), attribute, str(old_val)) + self.ldb.set_pwdProperties(val) + + def mapper(self): + """ldap value : samba setter""" + return {"minPwdAge": (self.ch_minPwdAge, days2rel_nttime), + "maxPwdAge": (self.ch_maxPwdAge, days2rel_nttime), + # Could be none, but I like the method assignment in + # update_samba + "minPwdLength": (self.ch_minPwdLength, lambda val: val), + "pwdProperties": (self.ch_pwdProperties, lambda val: val), + + } + + def __str__(self): + return 'System Access' + + def rsop(self, gpo): + output = {} + if self.lp.get('server role') != 'active directory domain controller': + return output + inf_file = 'MACHINE/Microsoft/Windows NT/SecEdit/GptTmpl.inf' + if gpo.file_sys_path: + path = os.path.join(gpo.file_sys_path, inf_file) + inf_conf = self.parse(path) + if not inf_conf: + return output + if str(self) in inf_conf.sections(): + section = str(self) + output[section] = {k: v for k, v in inf_conf.items(section) + if gp_access_ext.apply_map.get(k)} + return output diff --git a/python/samba/gp/gp_smb_conf_ext.py b/python/samba/gp/gp_smb_conf_ext.py new file mode 100644 index 0000000..3ef9cfd --- /dev/null +++ b/python/samba/gp/gp_smb_conf_ext.py @@ -0,0 +1,127 @@ +# gp_smb_conf_ext smb.conf gpo policy +# Copyright (C) David Mulder 2018 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os, numbers +from samba.gp.gpclass import gp_pol_ext, gp_misc_applier +from tempfile import NamedTemporaryFile +from samba.gp.util.logging import log + +def is_number(x): + return isinstance(x, numbers.Number) and \ + type(x) != bool + +class gp_smb_conf_ext(gp_pol_ext, gp_misc_applier): + def unapply(self, guid, attribute, val): + current = self.lp.get(attribute) + data = self.parse_value(val) + + # Only overwrite the smb.conf setting if it hasn't been modified. It + # may have been modified by another GPO. + if 'new_val' not in data or \ + self.lptype_to_string(current) == data['new_val']: + self.lp.set(attribute, self.regtype_to_lptype(data['old_val'], + current)) + self.store_lp_smb_conf(self.lp) + log.info('smb.conf [global] was changed', + { attribute : str(data['old_val']) }) + + self.cache_remove_attribute(guid, attribute) + + def apply(self, guid, attribute, val): + old_val = self.lp.get(attribute) + val = self.regtype_to_lptype(val, old_val) + + self.lp.set(attribute, val) + self.store_lp_smb_conf(self.lp) + log.info('smb.conf [global] was changed', { attribute : str(val) }) + + data = self.generate_value(old_val=self.lptype_to_string(old_val), + new_val=self.lptype_to_string(val)) + self.cache_add_attribute(guid, attribute, data) + + def process_group_policy(self, deleted_gpo_list, changed_gpo_list): + pol_file = 'MACHINE/Registry.pol' + for guid, settings in deleted_gpo_list: + smb_conf = settings.get('smb.conf') + if smb_conf is None: + continue + for key, value in smb_conf.items(): + self.unapply(guid, key, value) + + for gpo in changed_gpo_list: + if gpo.file_sys_path: + section_name = 'Software\\Policies\\Samba\\smb_conf' + path = os.path.join(gpo.file_sys_path, pol_file) + pol_conf = self.parse(path) + if not pol_conf: + continue + attrs = [] + for e in pol_conf.entries: + if not e.keyname.startswith(section_name): + continue + attrs.append(e.valuename) + self.apply(gpo.name, e.valuename, e.data) + + # Cleanup settings which were removed from the policy + self.clean(gpo.name, keep=attrs) + + def regtype_to_lptype(self, val, old_val): + if type(val) == bytes: + val = val.decode() + if is_number(val) and is_number(old_val): + val = str(val) + elif is_number(val) and type(old_val) == bool: + val = bool(val) + if type(val) == bool: + val = 'yes' if val else 'no' + return val + + def store_lp_smb_conf(self, lp): + with NamedTemporaryFile(delete=False, + dir=os.path.dirname(lp.configfile)) as f: + lp.dump(False, f.name) + mode = os.stat(lp.configfile).st_mode + os.chmod(f.name, mode) + os.rename(f.name, lp.configfile) + + def lptype_to_string(self, val): + if is_number(val): + val = str(val) + elif type(val) == bool: + val = 'yes' if val else 'no' + elif type(val) == list: + val = ' '.join(val) + return val + + def __str__(self): + return "smb.conf" + + def rsop(self, gpo): + output = {} + if gpo.file_sys_path: + section_name = 'Software\\Policies\\Samba\\smb_conf' + pol_file = 'MACHINE/Registry.pol' + path = os.path.join(gpo.file_sys_path, pol_file) + pol_conf = self.parse(path) + if not pol_conf: + return output + for e in pol_conf.entries: + if not e.keyname.startswith(section_name): + continue + if 'smb.conf' not in output.keys(): + output['smb.conf'] = {} + output['smb.conf'][e.valuename] = e.data + return output diff --git a/python/samba/gp/gp_sudoers_ext.py b/python/samba/gp/gp_sudoers_ext.py new file mode 100644 index 0000000..026aeba --- /dev/null +++ b/python/samba/gp/gp_sudoers_ext.py @@ -0,0 +1,116 @@ +# gp_sudoers_ext samba gpo policy +# Copyright (C) David Mulder 2020 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os +from samba.gp.gpclass import gp_pol_ext, gp_file_applier +from tempfile import NamedTemporaryFile +from subprocess import Popen, PIPE +from samba.gp.util.logging import log + +def find_executable(executable, path): + paths = path.split(os.pathsep) + for p in paths: + f = os.path.join(p, executable) + if os.path.isfile(f): + return f + return None + +intro = ''' +### autogenerated by samba +# +# This file is generated by the gp_sudoers_ext Group Policy +# Client Side Extension. To modify the contents of this file, +# modify the appropriate Group Policy objects which apply +# to this machine. DO NOT MODIFY THIS FILE DIRECTLY. +# + +''' +visudo = find_executable('visudo', + path='%s:%s' % (os.environ['PATH'], '/usr/sbin')) + +def sudo_applier_func(sudo_dir, sudo_entries): + ret = [] + for p in sudo_entries: + contents = intro + contents += '%s\n' % p + with NamedTemporaryFile() as f: + with open(f.name, 'w') as w: + w.write(contents) + if visudo is None: + raise FileNotFoundError('visudo not found, please install it') + with Popen([visudo, '-c', '-f', f.name], + stdout=PIPE, stderr=PIPE) as proc: + sudo_validation = proc.wait() + if sudo_validation == 0: + with NamedTemporaryFile(prefix='gp_', + delete=False, + dir=sudo_dir) as f: + with open(f.name, 'w') as w: + w.write(contents) + ret.append(f.name) + else: + log.error('Sudoers apply failed', p) + return ret + +class gp_sudoers_ext(gp_pol_ext, gp_file_applier): + def __str__(self): + return 'Unix Settings/Sudo Rights' + + def process_group_policy(self, deleted_gpo_list, changed_gpo_list, + sdir='/etc/sudoers.d'): + for guid, settings in deleted_gpo_list: + if str(self) in settings: + for attribute, sudoers in settings[str(self)].items(): + self.unapply(guid, attribute, sudoers) + + for gpo in changed_gpo_list: + if gpo.file_sys_path: + section = 'Software\\Policies\\Samba\\Unix Settings\\Sudo Rights' + pol_file = 'MACHINE/Registry.pol' + path = os.path.join(gpo.file_sys_path, pol_file) + pol_conf = self.parse(path) + if not pol_conf: + continue + sudo_entries = [] + for e in pol_conf.entries: + if e.keyname == section and e.data.strip(): + sudo_entries.append(e.data) + # Each GPO applies only one set of sudoers, in a + # set of files, so the attribute does not need uniqueness. + attribute = self.generate_attribute(gpo.name) + # The value hash is generated from the sudo_entries, ensuring + # any changes to this GPO will cause the files to be rewritten. + value_hash = self.generate_value_hash(*sudo_entries) + self.apply(gpo.name, attribute, value_hash, sudo_applier_func, + sdir, sudo_entries) + # Cleanup any old entries that are no longer part of the policy + self.clean(gpo.name, keep=[attribute]) + + def rsop(self, gpo): + output = {} + pol_file = 'MACHINE/Registry.pol' + if gpo.file_sys_path: + path = os.path.join(gpo.file_sys_path, pol_file) + pol_conf = self.parse(path) + if not pol_conf: + return output + for e in pol_conf.entries: + key = e.keyname.split('\\')[-1] + if key.endswith('Sudo Rights') and e.data.strip(): + if key not in output.keys(): + output[key] = [] + output[key].append(e.data) + return output diff --git a/python/samba/gp/gpclass.py b/python/samba/gp/gpclass.py new file mode 100644 index 0000000..08be472 --- /dev/null +++ b/python/samba/gp/gpclass.py @@ -0,0 +1,1312 @@ +# Reads important GPO parameters and updates Samba +# Copyright (C) Luke Morrison 2013 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +import sys +import os, shutil +import errno +import tdb +import pwd +sys.path.insert(0, "bin/python") +from samba import WERRORError +from configparser import ConfigParser +from io import StringIO +import traceback +from samba.common import get_bytes +from abc import ABCMeta, abstractmethod +import xml.etree.ElementTree as etree +import re +from samba.net import Net +from samba.dcerpc import nbt +from samba.samba3 import libsmb_samba_internal as libsmb +import samba.gpo as gpo +from uuid import UUID +from tempfile import NamedTemporaryFile +from samba.dcerpc import preg +from samba.ndr import ndr_unpack +from samba.credentials import SMB_SIGNING_REQUIRED +from samba.gp.util.logging import log +from hashlib import blake2b +import numbers +from samba.common import get_string +from samba.samdb import SamDB +from samba.auth import system_session +import ldb +from samba.dsdb import UF_WORKSTATION_TRUST_ACCOUNT, UF_SERVER_TRUST_ACCOUNT, GPLINK_OPT_ENFORCE, GPLINK_OPT_DISABLE, GPO_BLOCK_INHERITANCE +from samba.auth import AUTH_SESSION_INFO_DEFAULT_GROUPS, AUTH_SESSION_INFO_AUTHENTICATED, AUTH_SESSION_INFO_SIMPLE_PRIVILEGES +from samba.dcerpc import security +import samba.security +from samba.dcerpc import nbt +from datetime import datetime + + +try: + from enum import Enum + GPOSTATE = Enum('GPOSTATE', 'APPLY ENFORCE UNAPPLY') +except ImportError: + class GPOSTATE: + APPLY = 1 + ENFORCE = 2 + UNAPPLY = 3 + + +class gp_log: + """ Log settings overwritten by gpo apply + The gp_log is an xml file that stores a history of gpo changes (and the + original setting value). + + The log is organized like so: + + + + + + + + + -864000000000 + -36288000000000 + 7 + 1 + + + 1d + + 300 + + + + + + Each guid value contains a list of extensions, which contain a list of + attributes. The guid value represents a GPO. The attributes are the values + of those settings prior to the application of the GPO. + The list of guids is enclosed within a user name, which represents the user + the settings were applied to. This user may be the samaccountname of the + local computer, which implies that these are machine policies. + The applylog keeps track of the order in which the GPOs were applied, so + that they can be rolled back in reverse, returning the machine to the state + prior to policy application. + """ + def __init__(self, user, gpostore, db_log=None): + """ Initialize the gp_log + param user - the username (or machine name) that policies are + being applied to + param gpostore - the GPOStorage obj which references the tdb which + contains gp_logs + param db_log - (optional) a string to initialize the gp_log + """ + self._state = GPOSTATE.APPLY + self.gpostore = gpostore + self.username = user + if db_log: + self.gpdb = etree.fromstring(db_log) + else: + self.gpdb = etree.Element('gp') + self.user = user + user_obj = self.gpdb.find('user[@name="%s"]' % user) + if user_obj is None: + user_obj = etree.SubElement(self.gpdb, 'user') + user_obj.attrib['name'] = user + + def state(self, value): + """ Policy application state + param value - APPLY, ENFORCE, or UNAPPLY + + The behavior of the gp_log depends on whether we are applying policy, + enforcing policy, or unapplying policy. During an apply, old settings + are recorded in the log. During an enforce, settings are being applied + but the gp_log does not change. During an unapply, additions to the log + should be ignored (since function calls to apply settings are actually + reverting policy), but removals from the log are allowed. + """ + # If we're enforcing, but we've unapplied, apply instead + if value == GPOSTATE.ENFORCE: + user_obj = self.gpdb.find('user[@name="%s"]' % self.user) + apply_log = user_obj.find('applylog') + if apply_log is None or len(apply_log) == 0: + self._state = GPOSTATE.APPLY + else: + self._state = value + else: + self._state = value + + def get_state(self): + """Check the GPOSTATE + """ + return self._state + + def set_guid(self, guid): + """ Log to a different GPO guid + param guid - guid value of the GPO from which we're applying + policy + """ + self.guid = guid + user_obj = self.gpdb.find('user[@name="%s"]' % self.user) + obj = user_obj.find('guid[@value="%s"]' % guid) + if obj is None: + obj = etree.SubElement(user_obj, 'guid') + obj.attrib['value'] = guid + if self._state == GPOSTATE.APPLY: + apply_log = user_obj.find('applylog') + if apply_log is None: + apply_log = etree.SubElement(user_obj, 'applylog') + prev = apply_log.find('guid[@value="%s"]' % guid) + if prev is None: + item = etree.SubElement(apply_log, 'guid') + item.attrib['count'] = '%d' % (len(apply_log) - 1) + item.attrib['value'] = guid + + def store(self, gp_ext_name, attribute, old_val): + """ Store an attribute in the gp_log + param gp_ext_name - Name of the extension applying policy + param attribute - The attribute being modified + param old_val - The value of the attribute prior to policy + application + """ + if self._state == GPOSTATE.UNAPPLY or self._state == GPOSTATE.ENFORCE: + return None + user_obj = self.gpdb.find('user[@name="%s"]' % self.user) + guid_obj = user_obj.find('guid[@value="%s"]' % self.guid) + assert guid_obj is not None, "gpo guid was not set" + ext = guid_obj.find('gp_ext[@name="%s"]' % gp_ext_name) + if ext is None: + ext = etree.SubElement(guid_obj, 'gp_ext') + ext.attrib['name'] = gp_ext_name + attr = ext.find('attribute[@name="%s"]' % attribute) + if attr is None: + attr = etree.SubElement(ext, 'attribute') + attr.attrib['name'] = attribute + attr.text = old_val + + def retrieve(self, gp_ext_name, attribute): + """ Retrieve a stored attribute from the gp_log + param gp_ext_name - Name of the extension which applied policy + param attribute - The attribute being retrieved + return - The value of the attribute prior to policy + application + """ + user_obj = self.gpdb.find('user[@name="%s"]' % self.user) + guid_obj = user_obj.find('guid[@value="%s"]' % self.guid) + assert guid_obj is not None, "gpo guid was not set" + ext = guid_obj.find('gp_ext[@name="%s"]' % gp_ext_name) + if ext is not None: + attr = ext.find('attribute[@name="%s"]' % attribute) + if attr is not None: + return attr.text + return None + + def retrieve_all(self, gp_ext_name): + """ Retrieve all stored attributes for this user, GPO guid, and CSE + param gp_ext_name - Name of the extension which applied policy + return - The values of the attributes prior to policy + application + """ + user_obj = self.gpdb.find('user[@name="%s"]' % self.user) + guid_obj = user_obj.find('guid[@value="%s"]' % self.guid) + assert guid_obj is not None, "gpo guid was not set" + ext = guid_obj.find('gp_ext[@name="%s"]' % gp_ext_name) + if ext is not None: + attrs = ext.findall('attribute') + return {attr.attrib['name']: attr.text for attr in attrs} + return {} + + def get_applied_guids(self): + """ Return a list of applied ext guids + return - List of guids for gpos that have applied settings + to the system. + """ + guids = [] + user_obj = self.gpdb.find('user[@name="%s"]' % self.user) + if user_obj is not None: + apply_log = user_obj.find('applylog') + if apply_log is not None: + guid_objs = apply_log.findall('guid[@count]') + guids_by_count = [(g.get('count'), g.get('value')) + for g in guid_objs] + guids_by_count.sort(reverse=True) + guids.extend(guid for count, guid in guids_by_count) + return guids + + def get_applied_settings(self, guids): + """ Return a list of applied ext guids + return - List of tuples containing the guid of a gpo, then + a dictionary of policies and their values prior + policy application. These are sorted so that the + most recently applied settings are removed first. + """ + ret = [] + user_obj = self.gpdb.find('user[@name="%s"]' % self.user) + for guid in guids: + guid_settings = user_obj.find('guid[@value="%s"]' % guid) + exts = guid_settings.findall('gp_ext') + settings = {} + for ext in exts: + attr_dict = {} + attrs = ext.findall('attribute') + for attr in attrs: + attr_dict[attr.attrib['name']] = attr.text + settings[ext.attrib['name']] = attr_dict + ret.append((guid, settings)) + return ret + + def delete(self, gp_ext_name, attribute): + """ Remove an attribute from the gp_log + param gp_ext_name - name of extension from which to remove the + attribute + param attribute - attribute to remove + """ + user_obj = self.gpdb.find('user[@name="%s"]' % self.user) + guid_obj = user_obj.find('guid[@value="%s"]' % self.guid) + assert guid_obj is not None, "gpo guid was not set" + ext = guid_obj.find('gp_ext[@name="%s"]' % gp_ext_name) + if ext is not None: + attr = ext.find('attribute[@name="%s"]' % attribute) + if attr is not None: + ext.remove(attr) + if len(ext) == 0: + guid_obj.remove(ext) + + def commit(self): + """ Write gp_log changes to disk """ + self.gpostore.store(self.username, etree.tostring(self.gpdb, 'utf-8')) + + +class GPOStorage: + def __init__(self, log_file): + if os.path.isfile(log_file): + self.log = tdb.open(log_file) + else: + self.log = tdb.Tdb(log_file, 0, tdb.DEFAULT, os.O_CREAT | os.O_RDWR) + + def start(self): + self.log.transaction_start() + + def get_int(self, key): + try: + return int(self.log.get(get_bytes(key))) + except TypeError: + return None + + def get(self, key): + return self.log.get(get_bytes(key)) + + def get_gplog(self, user): + return gp_log(user, self, self.log.get(get_bytes(user))) + + def store(self, key, val): + self.log.store(get_bytes(key), get_bytes(val)) + + def cancel(self): + self.log.transaction_cancel() + + def delete(self, key): + self.log.delete(get_bytes(key)) + + def commit(self): + self.log.transaction_commit() + + def __del__(self): + self.log.close() + + +class gp_ext(object): + __metaclass__ = ABCMeta + + def __init__(self, lp, creds, username, store): + self.lp = lp + self.creds = creds + self.username = username + self.gp_db = store.get_gplog(username) + + @abstractmethod + def process_group_policy(self, deleted_gpo_list, changed_gpo_list): + pass + + @abstractmethod + def read(self, policy): + pass + + def parse(self, afile): + local_path = self.lp.cache_path('gpo_cache') + data_file = os.path.join(local_path, check_safe_path(afile).upper()) + if os.path.exists(data_file): + return self.read(data_file) + return None + + @abstractmethod + def __str__(self): + pass + + @abstractmethod + def rsop(self, gpo): + return {} + + +class gp_inf_ext(gp_ext): + def read(self, data_file): + with open(data_file, 'rb') as f: + policy = f.read() + inf_conf = ConfigParser(interpolation=None) + inf_conf.optionxform = str + try: + inf_conf.read_file(StringIO(policy.decode())) + except UnicodeDecodeError: + inf_conf.read_file(StringIO(policy.decode('utf-16'))) + return inf_conf + + +class gp_pol_ext(gp_ext): + def read(self, data_file): + with open(data_file, 'rb') as f: + raw = f.read() + return ndr_unpack(preg.file, raw) + + +class gp_xml_ext(gp_ext): + def read(self, data_file): + with open(data_file, 'rb') as f: + raw = f.read() + try: + return etree.fromstring(raw.decode()) + except UnicodeDecodeError: + return etree.fromstring(raw.decode('utf-16')) + + +class gp_applier(object): + """Group Policy Applier/Unapplier/Modifier + The applier defines functions for monitoring policy application, + removal, and modification. It must be a multi-derived class paired + with a subclass of gp_ext. + """ + __metaclass__ = ABCMeta + + def cache_add_attribute(self, guid, attribute, value): + """Add an attribute and value to the Group Policy cache + guid - The GPO guid which applies this policy + attribute - The attribute name of the policy being applied + value - The value of the policy being applied + + Normally called by the subclass apply() function after applying policy. + """ + self.gp_db.set_guid(guid) + self.gp_db.store(str(self), attribute, value) + self.gp_db.commit() + + def cache_remove_attribute(self, guid, attribute): + """Remove an attribute from the Group Policy cache + guid - The GPO guid which applies this policy + attribute - The attribute name of the policy being unapplied + + Normally called by the subclass unapply() function when removing old + policy. + """ + self.gp_db.set_guid(guid) + self.gp_db.delete(str(self), attribute) + self.gp_db.commit() + + def cache_get_attribute_value(self, guid, attribute): + """Retrieve the value stored in the cache for the given attribute + guid - The GPO guid which applies this policy + attribute - The attribute name of the policy + """ + self.gp_db.set_guid(guid) + return self.gp_db.retrieve(str(self), attribute) + + def cache_get_all_attribute_values(self, guid): + """Retrieve all attribute/values currently stored for this gpo+policy + guid - The GPO guid which applies this policy + """ + self.gp_db.set_guid(guid) + return self.gp_db.retrieve_all(str(self)) + + def cache_get_apply_state(self): + """Return the current apply state + return - APPLY|ENFORCE|UNAPPLY + """ + return self.gp_db.get_state() + + def generate_attribute(self, name, *args): + """Generate an attribute name from arbitrary data + name - A name to ensure uniqueness + args - Any arbitrary set of args, str or bytes + return - A blake2b digest of the data, the attribute + + The importance here is the digest of the data makes the attribute + reproducible and uniquely identifies it. Hashing the name with + the data ensures we don't falsely identify a match which is the same + text in a different file. Using this attribute generator is optional. + """ + data = b''.join([get_bytes(arg) for arg in [*args]]) + return blake2b(get_bytes(name)+data).hexdigest() + + def generate_value_hash(self, *args): + """Generate a unique value which identifies value changes + args - Any arbitrary set of args, str or bytes + return - A blake2b digest of the data, the value represented + """ + data = b''.join([get_bytes(arg) for arg in [*args]]) + return blake2b(data).hexdigest() + + @abstractmethod + def unapply(self, guid, attribute, value): + """Group Policy Unapply + guid - The GPO guid which applies this policy + attribute - The attribute name of the policy being unapplied + value - The value of the policy being unapplied + """ + pass + + @abstractmethod + def apply(self, guid, attribute, applier_func, *args): + """Group Policy Apply + guid - The GPO guid which applies this policy + attribute - The attribute name of the policy being applied + applier_func - An applier function which takes variable args + args - The variable arguments to pass to applier_func + + The applier_func function MUST return the value of the policy being + applied. It's important that implementations of `apply` check for and + first unapply any changed policy. See for example calls to + `cache_get_all_attribute_values()` which searches for all policies + applied by this GPO for this Client Side Extension (CSE). + """ + pass + + def clean(self, guid, keep=None, remove=None, **kwargs): + """Cleanup old removed attributes + keep - A list of attributes to keep + remove - A single attribute to remove, or a list of attributes to + remove + kwargs - Additional keyword args required by the subclass unapply + function + + This is only necessary for CSEs which provide multiple attributes. + """ + # Clean syntax is, either provide a single remove attribute, + # or a list of either removal attributes or keep attributes. + if keep is None: + keep = [] + if remove is None: + remove = [] + + if type(remove) != list: + value = self.cache_get_attribute_value(guid, remove) + if value is not None: + self.unapply(guid, remove, value, **kwargs) + else: + old_vals = self.cache_get_all_attribute_values(guid) + for attribute, value in old_vals.items(): + if (len(remove) > 0 and attribute in remove) or \ + (len(keep) > 0 and attribute not in keep): + self.unapply(guid, attribute, value, **kwargs) + + +class gp_misc_applier(gp_applier): + """Group Policy Miscellaneous Applier/Unapplier/Modifier + """ + + def generate_value(self, **kwargs): + data = etree.Element('data') + for k, v in kwargs.items(): + arg = etree.SubElement(data, k) + arg.text = get_string(v) + return get_string(etree.tostring(data, 'utf-8')) + + def parse_value(self, value): + vals = {} + try: + data = etree.fromstring(value) + except etree.ParseError: + # If parsing fails, then it's an old cache value + return {'old_val': value} + except TypeError: + return {} + itr = data.iter() + next(itr) # Skip the top element + for item in itr: + vals[item.tag] = item.text + return vals + + +class gp_file_applier(gp_applier): + """Group Policy File Applier/Unapplier/Modifier + Subclass of abstract class gp_applier for monitoring policy applied + via a file. + """ + + def __generate_value(self, value_hash, files, sep): + data = [value_hash] + data.extend(files) + return sep.join(data) + + def __parse_value(self, value, sep): + """Parse a value + return - A unique HASH, followed by the file list + """ + if value is None: + return None, [] + data = value.split(sep) + if '/' in data[0]: + # The first element is not a hash, but a filename. This is a + # legacy value. + return None, data + else: + return data[0], data[1:] if len(data) > 1 else [] + + def unapply(self, guid, attribute, files, sep=':'): + # If the value isn't a list of files, parse value from the log + if type(files) != list: + _, files = self.__parse_value(files, sep) + for file in files: + if os.path.exists(file): + os.unlink(file) + self.cache_remove_attribute(guid, attribute) + + def apply(self, guid, attribute, value_hash, applier_func, *args, sep=':'): + """ + applier_func MUST return a list of files created by the applier. + + This applier is for policies which only apply to a single file (with + a couple small exceptions). This applier will remove any policy applied + by this GPO which doesn't match the new policy. + """ + # If the policy has changed, unapply, then apply new policy + old_val = self.cache_get_attribute_value(guid, attribute) + # Ignore removal if this policy is applied and hasn't changed + old_val_hash, old_val_files = self.__parse_value(old_val, sep) + if (old_val_hash != value_hash or + self.cache_get_apply_state() == GPOSTATE.ENFORCE) or \ + not all([os.path.exists(f) for f in old_val_files]): + self.unapply(guid, attribute, old_val_files) + else: + # If policy is already applied, skip application + return + + # Apply the policy and log the changes + files = applier_func(*args) + new_value = self.__generate_value(value_hash, files, sep) + self.cache_add_attribute(guid, attribute, new_value) + + +""" Fetch the hostname of a writable DC """ + + +def get_dc_hostname(creds, lp): + net = Net(creds=creds, lp=lp) + cldap_ret = net.finddc(domain=lp.get('realm'), flags=(nbt.NBT_SERVER_LDAP | + nbt.NBT_SERVER_DS)) + return cldap_ret.pdc_dns_name + + +""" Fetch a list of GUIDs for applicable GPOs """ + + +def get_gpo(samdb, gpo_dn): + g = gpo.GROUP_POLICY_OBJECT() + attrs = [ + "cn", + "displayName", + "flags", + "gPCFileSysPath", + "gPCFunctionalityVersion", + "gPCMachineExtensionNames", + "gPCUserExtensionNames", + "gPCWQLFilter", + "name", + "nTSecurityDescriptor", + "versionNumber" + ] + if gpo_dn.startswith("LDAP://"): + gpo_dn = gpo_dn.lstrip("LDAP://") + + sd_flags = (security.SECINFO_OWNER | + security.SECINFO_GROUP | + security.SECINFO_DACL) + try: + res = samdb.search(gpo_dn, ldb.SCOPE_BASE, "(objectclass=*)", attrs, + controls=['sd_flags:1:%d' % sd_flags]) + except Exception: + log.error('Failed to fetch gpo object with nTSecurityDescriptor') + raise + if res.count != 1: + raise ldb.LdbError(ldb.ERR_NO_SUCH_OBJECT, + 'get_gpo: search failed') + + g.ds_path = gpo_dn + if 'versionNumber' in res.msgs[0].keys(): + g.version = int(res.msgs[0]['versionNumber'][0]) + if 'flags' in res.msgs[0].keys(): + g.options = int(res.msgs[0]['flags'][0]) + if 'gPCFileSysPath' in res.msgs[0].keys(): + g.file_sys_path = res.msgs[0]['gPCFileSysPath'][0].decode() + if 'displayName' in res.msgs[0].keys(): + g.display_name = res.msgs[0]['displayName'][0].decode() + if 'name' in res.msgs[0].keys(): + g.name = res.msgs[0]['name'][0].decode() + if 'gPCMachineExtensionNames' in res.msgs[0].keys(): + g.machine_extensions = str(res.msgs[0]['gPCMachineExtensionNames'][0]) + if 'gPCUserExtensionNames' in res.msgs[0].keys(): + g.user_extensions = str(res.msgs[0]['gPCUserExtensionNames'][0]) + if 'nTSecurityDescriptor' in res.msgs[0].keys(): + g.set_sec_desc(bytes(res.msgs[0]['nTSecurityDescriptor'][0])) + return g + +class GP_LINK: + def __init__(self, gPLink, gPOptions): + self.link_names = [] + self.link_opts = [] + self.gpo_parse_gplink(gPLink) + self.gp_opts = int(gPOptions) + + def gpo_parse_gplink(self, gPLink): + for p in gPLink.decode().split(']'): + if not p: + continue + log.debug('gpo_parse_gplink: processing link') + p = p.lstrip('[') + link_name, link_opt = p.split(';') + log.debug('gpo_parse_gplink: link: {}'.format(link_name)) + log.debug('gpo_parse_gplink: opt: {}'.format(link_opt)) + self.link_names.append(link_name) + self.link_opts.append(int(link_opt)) + + def num_links(self): + if len(self.link_names) != len(self.link_opts): + raise RuntimeError('Link names and opts mismatch') + return len(self.link_names) + +def find_samaccount(samdb, samaccountname): + attrs = ['dn', 'userAccountControl'] + res = samdb.search(samdb.get_default_basedn(), ldb.SCOPE_SUBTREE, + '(sAMAccountName={})'.format(samaccountname), attrs) + if res.count != 1: + raise ldb.LdbError(ldb.ERR_NO_SUCH_OBJECT, + "Failed to find samAccountName '{}'".format(samaccountname) + ) + uac = int(res.msgs[0]['userAccountControl'][0]) + dn = res.msgs[0]['dn'] + log.info('Found dn {} for samaccountname {}'.format(dn, samaccountname)) + return uac, dn + +def get_gpo_link(samdb, link_dn): + res = samdb.search(link_dn, ldb.SCOPE_BASE, + '(objectclass=*)', ['gPLink', 'gPOptions']) + if res.count != 1: + raise ldb.LdbError(ldb.ERR_NO_SUCH_OBJECT, 'get_gpo_link: no result') + if 'gPLink' not in res.msgs[0]: + raise ldb.LdbError(ldb.ERR_NO_SUCH_ATTRIBUTE, + "get_gpo_link: no 'gPLink' attribute found for '{}'".format(link_dn) + ) + gPLink = res.msgs[0]['gPLink'][0] + gPOptions = 0 + if 'gPOptions' in res.msgs[0]: + gPOptions = res.msgs[0]['gPOptions'][0] + else: + log.debug("get_gpo_link: no 'gPOptions' attribute found") + return GP_LINK(gPLink, gPOptions) + +def add_gplink_to_gpo_list(samdb, gpo_list, forced_gpo_list, link_dn, gp_link, + link_type, only_add_forced_gpos, token): + for i in range(gp_link.num_links()-1, -1, -1): + is_forced = (gp_link.link_opts[i] & GPLINK_OPT_ENFORCE) != 0 + if gp_link.link_opts[i] & GPLINK_OPT_DISABLE: + log.debug('skipping disabled GPO') + continue + + if only_add_forced_gpos: + if not is_forced: + log.debug("skipping nonenforced GPO link " + "because GPOPTIONS_BLOCK_INHERITANCE " + "has been set") + continue + else: + log.debug("adding enforced GPO link although " + "the GPOPTIONS_BLOCK_INHERITANCE " + "has been set") + + try: + new_gpo = get_gpo(samdb, gp_link.link_names[i]) + except ldb.LdbError as e: + (enum, estr) = e.args + log.debug("failed to get gpo: %s" % gp_link.link_names[i]) + if enum == ldb.ERR_NO_SUCH_OBJECT: + log.debug("skipping empty gpo: %s" % gp_link.link_names[i]) + continue + return + else: + try: + sec_desc = ndr_unpack(security.descriptor, + new_gpo.get_sec_desc_buf()) + samba.security.access_check(sec_desc, token, + security.SEC_STD_READ_CONTROL | + security.SEC_ADS_LIST | + security.SEC_ADS_READ_PROP) + except Exception as e: + log.debug("skipping GPO \"%s\" as object " + "has no access to it" % new_gpo.display_name) + continue + + new_gpo.link = str(link_dn) + new_gpo.link_type = link_type + + if is_forced: + forced_gpo_list.insert(0, new_gpo) + else: + gpo_list.insert(0, new_gpo) + + log.debug("add_gplink_to_gpo_list: added GPLINK #%d %s " + "to GPO list" % (i, gp_link.link_names[i])) + +def merge_with_system_token(token_1): + sids = token_1.sids + system_token = system_session().security_token + sids.extend(system_token.sids) + token_1.sids = sids + token_1.rights_mask |= system_token.rights_mask + token_1.privilege_mask |= system_token.privilege_mask + # There are no claims in the system token, so it is safe not to merge the claims + return token_1 + + +def site_dn_for_machine(samdb, dc_hostname, lp, creds, hostname): + # [MS-GPOL] 3.2.5.1.4 Site Search + + # The netr_DsRGetSiteName() needs to run over local rpc, however we do not + # have the call implemented in our rpc_server. + # What netr_DsRGetSiteName() actually does is an ldap query to get + # the sitename, we can do the same. + + # NtVer=(NETLOGON_NT_VERSION_IP|NETLOGON_NT_VERSION_WITH_CLOSEST_SITE| + # NETLOGON_NT_VERSION_5EX) [0x20000014] + expr = "(&(DnsDomain=%s.)(User=%s)(NtVer=\\14\\00\\00\\20))" % ( + samdb.domain_dns_name(), + hostname) + res = samdb.search( + base='', + scope=ldb.SCOPE_BASE, + expression=expr, + attrs=["Netlogon"]) + if res.count != 1: + raise RuntimeError('site_dn_for_machine: No result') + + samlogon_response = ndr_unpack(nbt.netlogon_samlogon_response, + bytes(res.msgs[0]['Netlogon'][0])) + if samlogon_response.ntver not in [nbt.NETLOGON_NT_VERSION_5EX, + (nbt.NETLOGON_NT_VERSION_1 + | nbt.NETLOGON_NT_VERSION_5EX)]: + raise RuntimeError('site_dn_for_machine: Invalid NtVer in ' + + 'netlogon_samlogon_response') + + # We want NETLOGON_NT_VERSION_5EX out of the union! + samlogon_response.ntver = nbt.NETLOGON_NT_VERSION_5EX + samlogon_response_ex = samlogon_response.data + + client_site = "Default-First-Site-Name" + if (samlogon_response_ex.client_site + and len(samlogon_response_ex.client_site) > 1): + client_site = samlogon_response_ex.client_site + + site_dn = samdb.get_config_basedn() + site_dn.add_child("CN=Sites") + site_dn.add_child("CN=%s" % (client_site)) + + return site_dn + + + +def get_gpo_list(dc_hostname, creds, lp, username): + """Get the full list of GROUP_POLICY_OBJECTs for a given username. + Push GPOs to gpo_list so that the traversal order of the list matches + the order of application: + (L)ocal (S)ite (D)omain (O)rganizational(U)nit + For different domains and OUs: parent-to-child. + Within same level of domains and OUs: Link order. + Since GPOs are pushed to the front of gpo_list, GPOs have to be + pushed in the opposite order of application (OUs first, local last, + child-to-parent). + Forced GPOs are appended in the end since they override all others. + """ + gpo_list = [] + forced_gpo_list = [] + url = 'ldap://' + dc_hostname + samdb = SamDB(url=url, + session_info=system_session(), + credentials=creds, lp=lp) + # username is DOM\\SAM, but get_gpo_list expects SAM + uac, dn = find_samaccount(samdb, username.split('\\')[-1]) + add_only_forced_gpos = False + + # Fetch the security token + session_info_flags = (AUTH_SESSION_INFO_DEFAULT_GROUPS | + AUTH_SESSION_INFO_AUTHENTICATED) + if url.startswith('ldap'): + session_info_flags |= AUTH_SESSION_INFO_SIMPLE_PRIVILEGES + session = samba.auth.user_session(samdb, lp_ctx=lp, dn=dn, + session_info_flags=session_info_flags) + gpo_list_machine = False + if uac & UF_WORKSTATION_TRUST_ACCOUNT or uac & UF_SERVER_TRUST_ACCOUNT: + gpo_list_machine = True + token = merge_with_system_token(session.security_token) + else: + token = session.security_token + + # (O)rganizational(U)nit + parent_dn = dn.parent() + while True: + if str(parent_dn) == str(samdb.get_default_basedn().parent()): + break + + # An account can be a member of more OUs + if parent_dn.get_component_name(0) == 'OU': + try: + log.debug("get_gpo_list: query OU: [%s] for GPOs" % parent_dn) + gp_link = get_gpo_link(samdb, parent_dn) + except ldb.LdbError as e: + (enum, estr) = e.args + log.debug(estr) + else: + add_gplink_to_gpo_list(samdb, gpo_list, forced_gpo_list, + parent_dn, gp_link, + gpo.GP_LINK_OU, + add_only_forced_gpos, token) + + # block inheritance from now on + if gp_link.gp_opts & GPO_BLOCK_INHERITANCE: + add_only_forced_gpos = True + + parent_dn = parent_dn.parent() + + # (D)omain + parent_dn = dn.parent() + while True: + if str(parent_dn) == str(samdb.get_default_basedn().parent()): + break + + # An account can just be a member of one domain + if parent_dn.get_component_name(0) == 'DC': + try: + log.debug("get_gpo_list: query DC: [%s] for GPOs" % parent_dn) + gp_link = get_gpo_link(samdb, parent_dn) + except ldb.LdbError as e: + (enum, estr) = e.args + log.debug(estr) + else: + add_gplink_to_gpo_list(samdb, gpo_list, forced_gpo_list, + parent_dn, gp_link, + gpo.GP_LINK_DOMAIN, + add_only_forced_gpos, token) + + # block inheritance from now on + if gp_link.gp_opts & GPO_BLOCK_INHERITANCE: + add_only_forced_gpos = True + + parent_dn = parent_dn.parent() + + # (S)ite + if gpo_list_machine: + try: + site_dn = site_dn_for_machine(samdb, dc_hostname, lp, creds, username) + + try: + log.debug("get_gpo_list: query SITE: [%s] for GPOs" % site_dn) + gp_link = get_gpo_link(samdb, site_dn) + except ldb.LdbError as e: + (enum, estr) = e.args + log.debug(estr) + else: + add_gplink_to_gpo_list(samdb, gpo_list, forced_gpo_list, + site_dn, gp_link, + gpo.GP_LINK_SITE, + add_only_forced_gpos, token) + except ldb.LdbError: + # [MS-GPOL] 3.2.5.1.4 Site Search: If the method returns + # ERROR_NO_SITENAME, the remainder of this message MUST be skipped + # and the protocol sequence MUST continue at GPO Search + pass + + # (L)ocal + gpo_list.insert(0, gpo.GROUP_POLICY_OBJECT("Local Policy", + "Local Policy", + gpo.GP_LINK_LOCAL)) + + # Append |forced_gpo_list| at the end of |gpo_list|, + # so that forced GPOs are applied on top of non enforced GPOs. + return gpo_list+forced_gpo_list + + +def cache_gpo_dir(conn, cache, sub_dir): + loc_sub_dir = sub_dir.upper() + local_dir = os.path.join(cache, loc_sub_dir) + try: + os.makedirs(local_dir, mode=0o755) + except OSError as e: + if e.errno != errno.EEXIST: + raise + for fdata in conn.list(sub_dir): + if fdata['attrib'] & libsmb.FILE_ATTRIBUTE_DIRECTORY: + cache_gpo_dir(conn, cache, os.path.join(sub_dir, fdata['name'])) + else: + local_name = fdata['name'].upper() + f = NamedTemporaryFile(delete=False, dir=local_dir) + fname = os.path.join(sub_dir, fdata['name']).replace('/', '\\') + f.write(conn.loadfile(fname)) + f.close() + os.rename(f.name, os.path.join(local_dir, local_name)) + + +def check_safe_path(path): + dirs = re.split('/|\\\\', path) + if 'sysvol' in path.lower(): + ldirs = re.split('/|\\\\', path.lower()) + dirs = dirs[ldirs.index('sysvol') + 1:] + if '..' not in dirs: + return os.path.join(*dirs) + raise OSError(path) + + +def check_refresh_gpo_list(dc_hostname, lp, creds, gpos): + # Force signing for the connection + saved_signing_state = creds.get_smb_signing() + creds.set_smb_signing(SMB_SIGNING_REQUIRED) + conn = libsmb.Conn(dc_hostname, 'sysvol', lp=lp, creds=creds) + # Reset signing state + creds.set_smb_signing(saved_signing_state) + cache_path = lp.cache_path('gpo_cache') + for gpo_obj in gpos: + if not gpo_obj.file_sys_path: + continue + cache_gpo_dir(conn, cache_path, check_safe_path(gpo_obj.file_sys_path)) + + +def get_deleted_gpos_list(gp_db, gpos): + applied_gpos = gp_db.get_applied_guids() + current_guids = set([p.name for p in gpos]) + deleted_gpos = [guid for guid in applied_gpos if guid not in current_guids] + return gp_db.get_applied_settings(deleted_gpos) + +def gpo_version(lp, path): + # gpo.gpo_get_sysvol_gpt_version() reads the GPT.INI from a local file, + # read from the gpo client cache. + gpt_path = lp.cache_path(os.path.join('gpo_cache', path)) + return int(gpo.gpo_get_sysvol_gpt_version(gpt_path)[1]) + + +def apply_gp(lp, creds, store, gp_extensions, username, target, force=False): + gp_db = store.get_gplog(username) + dc_hostname = get_dc_hostname(creds, lp) + gpos = get_gpo_list(dc_hostname, creds, lp, username) + del_gpos = get_deleted_gpos_list(gp_db, gpos) + try: + check_refresh_gpo_list(dc_hostname, lp, creds, gpos) + except: + log.error('Failed downloading gpt cache from \'%s\' using SMB' + % dc_hostname) + return + + if force: + changed_gpos = gpos + gp_db.state(GPOSTATE.ENFORCE) + else: + changed_gpos = [] + for gpo_obj in gpos: + if not gpo_obj.file_sys_path: + continue + guid = gpo_obj.name + path = check_safe_path(gpo_obj.file_sys_path).upper() + version = gpo_version(lp, path) + if version != store.get_int(guid): + log.info('GPO %s has changed' % guid) + changed_gpos.append(gpo_obj) + gp_db.state(GPOSTATE.APPLY) + + store.start() + for ext in gp_extensions: + try: + ext = ext(lp, creds, username, store) + if target == 'Computer': + ext.process_group_policy(del_gpos, changed_gpos) + else: + drop_privileges(username, ext.process_group_policy, + del_gpos, changed_gpos) + except Exception as e: + log.error('Failed to apply extension %s' % str(ext)) + _, _, tb = sys.exc_info() + filename, line_number, _, _ = traceback.extract_tb(tb)[-1] + log.error('%s:%d: %s: %s' % (filename, line_number, + type(e).__name__, str(e))) + continue + for gpo_obj in gpos: + if not gpo_obj.file_sys_path: + continue + guid = gpo_obj.name + path = check_safe_path(gpo_obj.file_sys_path).upper() + version = gpo_version(lp, path) + store.store(guid, '%i' % version) + store.commit() + + +def unapply_gp(lp, creds, store, gp_extensions, username, target): + gp_db = store.get_gplog(username) + gp_db.state(GPOSTATE.UNAPPLY) + # Treat all applied gpos as deleted + del_gpos = gp_db.get_applied_settings(gp_db.get_applied_guids()) + store.start() + for ext in gp_extensions: + try: + ext = ext(lp, creds, username, store) + if target == 'Computer': + ext.process_group_policy(del_gpos, []) + else: + drop_privileges(username, ext.process_group_policy, + del_gpos, []) + except Exception as e: + log.error('Failed to unapply extension %s' % str(ext)) + log.error('Message was: ' + str(e)) + continue + store.commit() + + +def __rsop_vals(vals, level=4): + if type(vals) == dict: + ret = [' '*level + '[ %s ] = %s' % (k, __rsop_vals(v, level+2)) + for k, v in vals.items()] + return '\n' + '\n'.join(ret) + elif type(vals) == list: + ret = [' '*level + '[ %s ]' % __rsop_vals(v, level+2) for v in vals] + return '\n' + '\n'.join(ret) + else: + if isinstance(vals, numbers.Number): + return ' '*(level+2) + str(vals) + else: + return ' '*(level+2) + get_string(vals) + +def rsop(lp, creds, store, gp_extensions, username, target): + dc_hostname = get_dc_hostname(creds, lp) + gpos = get_gpo_list(dc_hostname, creds, lp, username) + check_refresh_gpo_list(dc_hostname, lp, creds, gpos) + + print('Resultant Set of Policy') + print('%s Policy\n' % target) + term_width = shutil.get_terminal_size(fallback=(120, 50))[0] + for gpo_obj in gpos: + if gpo_obj.display_name.strip() == 'Local Policy': + continue # We never apply local policy + print('GPO: %s' % gpo_obj.display_name) + print('='*term_width) + for ext in gp_extensions: + ext = ext(lp, creds, username, store) + cse_name_m = re.findall(r"'([\w\.]+)'", str(type(ext))) + if len(cse_name_m) > 0: + cse_name = cse_name_m[-1].split('.')[-1] + else: + cse_name = ext.__module__.split('.')[-1] + print(' CSE: %s' % cse_name) + print(' ' + ('-'*int(term_width/2))) + for section, settings in ext.rsop(gpo_obj).items(): + print(' Policy Type: %s' % section) + print(' ' + ('-'*int(term_width/2))) + print(__rsop_vals(settings).lstrip('\n')) + print(' ' + ('-'*int(term_width/2))) + print(' ' + ('-'*int(term_width/2))) + print('%s\n' % ('='*term_width)) + + +def parse_gpext_conf(smb_conf): + from samba.samba3 import param as s3param + lp = s3param.get_context() + if smb_conf is not None: + lp.load(smb_conf) + else: + lp.load_default() + ext_conf = lp.state_path('gpext.conf') + parser = ConfigParser(interpolation=None) + parser.read(ext_conf) + return lp, parser + + +def atomic_write_conf(lp, parser): + ext_conf = lp.state_path('gpext.conf') + with NamedTemporaryFile(mode="w+", delete=False, dir=os.path.dirname(ext_conf)) as f: + parser.write(f) + os.rename(f.name, ext_conf) + + +def check_guid(guid): + # Check for valid guid with curly braces + if guid[0] != '{' or guid[-1] != '}' or len(guid) != 38: + return False + try: + UUID(guid, version=4) + except ValueError: + return False + return True + + +def register_gp_extension(guid, name, path, + smb_conf=None, machine=True, user=True): + # Check that the module exists + if not os.path.exists(path): + return False + if not check_guid(guid): + return False + + lp, parser = parse_gpext_conf(smb_conf) + if guid not in parser.sections(): + parser.add_section(guid) + parser.set(guid, 'DllName', path) + parser.set(guid, 'ProcessGroupPolicy', name) + parser.set(guid, 'NoMachinePolicy', "0" if machine else "1") + parser.set(guid, 'NoUserPolicy', "0" if user else "1") + + atomic_write_conf(lp, parser) + + return True + + +def list_gp_extensions(smb_conf=None): + _, parser = parse_gpext_conf(smb_conf) + results = {} + for guid in parser.sections(): + results[guid] = {} + results[guid]['DllName'] = parser.get(guid, 'DllName') + results[guid]['ProcessGroupPolicy'] = \ + parser.get(guid, 'ProcessGroupPolicy') + results[guid]['MachinePolicy'] = \ + not int(parser.get(guid, 'NoMachinePolicy')) + results[guid]['UserPolicy'] = not int(parser.get(guid, 'NoUserPolicy')) + return results + + +def unregister_gp_extension(guid, smb_conf=None): + if not check_guid(guid): + return False + + lp, parser = parse_gpext_conf(smb_conf) + if guid in parser.sections(): + parser.remove_section(guid) + + atomic_write_conf(lp, parser) + + return True + + +def set_privileges(username, uid, gid): + """ + Set current process privileges + """ + + os.setegid(gid) + os.seteuid(uid) + + +def drop_privileges(username, func, *args): + """ + Run supplied function with privileges for specified username. + """ + current_uid = os.getuid() + + if not current_uid == 0: + raise Exception('Not enough permissions to drop privileges') + + user_uid = pwd.getpwnam(username).pw_uid + user_gid = pwd.getpwnam(username).pw_gid + + # Drop privileges + set_privileges(username, user_uid, user_gid) + + # We need to catch exception in order to be able to restore + # privileges later in this function + out = None + exc = None + try: + out = func(*args) + except Exception as e: + exc = e + + # Restore privileges + set_privileges('root', current_uid, 0) + + if exc: + raise exc + + return out + +def expand_pref_variables(text, gpt_path, lp, username=None): + utc_dt = datetime.utcnow() + dt = datetime.now() + cache_path = lp.cache_path(os.path.join('gpo_cache')) + # These are all the possible preference variables that MS supports. The + # variables set to 'None' here are currently unsupported by Samba, and will + # prevent the individual policy from applying. + variables = { 'AppDataDir': os.path.expanduser('~/.config'), + 'BinaryComputerSid': None, + 'BinaryUserSid': None, + 'CommonAppdataDir': None, + 'CommonDesktopDir': None, + 'CommonFavoritesDir': None, + 'CommonProgramsDir': None, + 'CommonStartUpDir': None, + 'ComputerName': lp.get('netbios name'), + 'CurrentProccessId': None, + 'CurrentThreadId': None, + 'DateTime': utc_dt.strftime('%Y-%m-%d %H:%M:%S UTC'), + 'DateTimeEx': str(utc_dt), + 'DesktopDir': os.path.expanduser('~/Desktop'), + 'DomainName': lp.get('realm'), + 'FavoritesDir': None, + 'GphPath': None, + 'GptPath': os.path.join(cache_path, + check_safe_path(gpt_path).upper()), + 'GroupPolicyVersion': None, + 'LastDriveMapped': None, + 'LastError': None, + 'LastErrorText': None, + 'LdapComputerSid': None, + 'LdapUserSid': None, + 'LocalTime': dt.strftime('%H:%M:%S'), + 'LocalTimeEx': dt.strftime('%H:%M:%S.%f'), + 'LogonDomain': lp.get('realm'), + 'LogonServer': None, + 'LogonUser': username, + 'LogonUserSid': None, + 'MacAddress': None, + 'NetPlacesDir': None, + 'OsVersion': None, + 'ProgramFilesDir': None, + 'ProgramsDir': None, + 'RecentDocumentsDir': None, + 'ResultCode': None, + 'ResultText': None, + 'ReversedComputerSid': None, + 'ReversedUserSid': None, + 'SendToDir': None, + 'StartMenuDir': None, + 'StartUpDir': None, + 'SystemDir': None, + 'SystemDrive': '/', + 'TempDir': '/tmp', + 'TimeStamp': str(datetime.timestamp(dt)), + 'TraceFile': None, + 'WindowsDir': None + } + for exp_var, val in variables.items(): + exp_var_fmt = '%%%s%%' % exp_var + if exp_var_fmt in text: + if val is None: + raise NameError('Expansion variable %s is undefined' % exp_var) + text = text.replace(exp_var_fmt, val) + return text diff --git a/python/samba/gp/util/logging.py b/python/samba/gp/util/logging.py new file mode 100644 index 0000000..da085d8 --- /dev/null +++ b/python/samba/gp/util/logging.py @@ -0,0 +1,112 @@ +# +# samba-gpupdate enhanced logging +# +# Copyright (C) 2019-2020 BaseALT Ltd. +# Copyright (C) David Mulder 2022 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import datetime +import logging +import gettext +import random +import sys + +logger = logging.getLogger("gp") + + +def logger_init(name, log_level): + logger.addHandler(logging.StreamHandler(sys.stdout)) + logger.setLevel(logging.CRITICAL) + if log_level == 1: + logger.setLevel(logging.ERROR) + elif log_level == 2: + logger.setLevel(logging.WARNING) + elif log_level == 3: + logger.setLevel(logging.INFO) + elif log_level >= 4: + logger.setLevel(logging.DEBUG) + +class slogm(object): + """ + Structured log message class + """ + def __init__(self, message, kwargs=None): + if kwargs is None: + kwargs = {} + self.message = message + self.kwargs = kwargs + if not isinstance(self.kwargs, dict): + self.kwargs = { 'val': self.kwargs } + + def __str__(self): + now = str(datetime.datetime.now().isoformat(sep=' ', timespec='milliseconds')) + args = dict() + args.update(self.kwargs) + result = '{}|{} | {}'.format(now, self.message, args) + + return result + +def message_with_code(mtype, message): + random.seed(message) + code = random.randint(0, 99999) + return '[' + mtype + str(code).rjust(5, '0') + ']| ' + \ + gettext.gettext(message) + +class log(object): + @staticmethod + def info(message, data=None): + if data is None: + data = {} + msg = message_with_code('I', message) + logger.info(slogm(msg, data)) + return msg + + @staticmethod + def warning(message, data=None): + if data is None: + data = {} + msg = message_with_code('W', message) + logger.warning(slogm(msg, data)) + return msg + + @staticmethod + def warn(message, data=None): + if data is None: + data = {} + return log.warning(message, data) + + @staticmethod + def error(message, data=None): + if data is None: + data = {} + msg = message_with_code('E', message) + logger.error(slogm(msg, data)) + return msg + + @staticmethod + def fatal(message, data=None): + if data is None: + data = {} + msg = message_with_code('F', message) + logger.fatal(slogm(msg, data)) + return msg + + @staticmethod + def debug(message, data=None): + if data is None: + data = {} + msg = message_with_code('D', message) + logger.debug(slogm(msg, data)) + return msg diff --git a/python/samba/gp/vgp_access_ext.py b/python/samba/gp/vgp_access_ext.py new file mode 100644 index 0000000..7efb3bb --- /dev/null +++ b/python/samba/gp/vgp_access_ext.py @@ -0,0 +1,178 @@ +# vgp_access_ext samba group policy +# Copyright (C) David Mulder 2020 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os, re +from samba.gp.gpclass import gp_xml_ext, gp_file_applier +from tempfile import NamedTemporaryFile +from samba.gp.util.logging import log + +intro = ''' +### autogenerated by samba +# +# This file is generated by the vgp_access_ext Group Policy +# Client Side Extension. To modify the contents of this file, +# modify the appropriate Group Policy objects which apply +# to this machine. DO NOT MODIFY THIS FILE DIRECTLY. +# + +''' + +# The deny all file is implicit any time an allow entry is used +DENY_BOUND = 9000000000 +DENY_FILE = '_gp_DENY_ALL.conf' + +# Each policy MUST create it's own DENY_ALL file if an allow entry exists, +# otherwise policies will conflict and one could remove a DENY_ALL when another +# one still requires it. +def deny_file(access): + deny_filename = os.path.join(access, + '%d%s' % (select_next_deny(access), DENY_FILE)) + with NamedTemporaryFile(delete=False, dir=access) as f: + with open(f.name, 'w') as w: + w.write(intro) + w.write('-:ALL:ALL') + os.chmod(f.name, 0o644) + os.rename(f.name, deny_filename) + return deny_filename + +def select_next_deny(directory): + configs = [re.match(r'(\d+)', f) for f in os.listdir(directory) if DENY_FILE in f] + return max([int(m.group(1)) for m in configs if m]+[DENY_BOUND])+1 + +# Access files in /etc/security/access.d are read in the order of the system +# locale. Here we number the conf files to ensure they are read in the correct +# order. Ignore the deny file, since allow entries should always come before +# the implicit deny ALL. +def select_next_conf(directory): + configs = [re.match(r'(\d+)', f) for f in os.listdir(directory) if DENY_FILE not in f] + return max([int(m.group(1)) for m in configs if m]+[0])+1 + +class vgp_access_ext(gp_xml_ext, gp_file_applier): + def __str__(self): + return 'VGP/Unix Settings/Host Access' + + def process_group_policy(self, deleted_gpo_list, changed_gpo_list, + access='/etc/security/access.d'): + for guid, settings in deleted_gpo_list: + if str(self) in settings: + for attribute, policy_files in settings[str(self)].items(): + self.unapply(guid, attribute, policy_files) + + for gpo in changed_gpo_list: + if gpo.file_sys_path: + allow = 'MACHINE/VGP/VTLA/VAS/HostAccessControl/Allow/manifest.xml' + path = os.path.join(gpo.file_sys_path, allow) + allow_conf = self.parse(path) + deny = 'MACHINE/VGP/VTLA/VAS/HostAccessControl/Deny/manifest.xml' + path = os.path.join(gpo.file_sys_path, deny) + deny_conf = self.parse(path) + entries = [] + policy_files = [] + winbind_sep = self.lp.get('winbind separator') + if allow_conf: + policy = allow_conf.find('policysetting') + data = policy.find('data') + allow_listelements = data.findall('listelement') + for listelement in allow_listelements: + adobject = listelement.find('adobject') + name = adobject.find('name').text + domain = adobject.find('domain').text + entries.append('+:%s%s%s:ALL' % (domain, + winbind_sep, + name)) + if len(allow_listelements) > 0: + log.info('Adding an implicit deny ALL because an allow' + ' entry is present') + policy_files.append(deny_file(access)) + if deny_conf: + policy = deny_conf.find('policysetting') + data = policy.find('data') + for listelement in data.findall('listelement'): + adobject = listelement.find('adobject') + name = adobject.find('name').text + domain = adobject.find('domain').text + entries.append('-:%s%s%s:ALL' % (domain, + winbind_sep, + name)) + if len(allow_listelements) > 0: + log.warn("Deny entry '%s' is meaningless with " + "allow present" % entries[-1]) + if len(entries) == 0: + continue + conf_id = select_next_conf(access) + access_file = os.path.join(access, '%010d_gp.conf' % conf_id) + policy_files.append(access_file) + access_contents = '\n'.join(entries) + # Each GPO applies only one set of access policies, so the + # attribute does not need uniqueness. + attribute = self.generate_attribute(gpo.name) + # The value hash is generated from the access policy, ensuring + # any changes to this GPO will cause the files to be rewritten. + value_hash = self.generate_value_hash(access_contents) + def applier_func(access, access_file, policy_files): + if not os.path.isdir(access): + os.mkdir(access, 0o644) + with NamedTemporaryFile(delete=False, dir=access) as f: + with open(f.name, 'w') as w: + w.write(intro) + w.write(access_contents) + os.chmod(f.name, 0o644) + os.rename(f.name, access_file) + return policy_files + self.apply(gpo.name, attribute, value_hash, applier_func, + access, access_file, policy_files) + # Cleanup any old entries that are no longer part of the policy + self.clean(gpo.name, keep=[attribute]) + + def rsop(self, gpo): + output = {} + if gpo.file_sys_path: + allow = 'MACHINE/VGP/VTLA/VAS/HostAccessControl/Allow/manifest.xml' + path = os.path.join(gpo.file_sys_path, allow) + allow_conf = self.parse(path) + deny = 'MACHINE/VGP/VTLA/VAS/HostAccessControl/Deny/manifest.xml' + path = os.path.join(gpo.file_sys_path, deny) + deny_conf = self.parse(path) + entries = [] + winbind_sep = self.lp.get('winbind separator') + if allow_conf: + policy = allow_conf.find('policysetting') + data = policy.find('data') + allow_listelements = data.findall('listelement') + for listelement in allow_listelements: + adobject = listelement.find('adobject') + name = adobject.find('name').text + domain = adobject.find('domain').text + if str(self) not in output.keys(): + output[str(self)] = [] + output[str(self)].append('+:%s%s%s:ALL' % (name, + winbind_sep, + domain)) + if len(allow_listelements) > 0: + output[str(self)].append('-:ALL:ALL') + if deny_conf: + policy = deny_conf.find('policysetting') + data = policy.find('data') + for listelement in data.findall('listelement'): + adobject = listelement.find('adobject') + name = adobject.find('name').text + domain = adobject.find('domain').text + if str(self) not in output.keys(): + output[str(self)] = [] + output[str(self)].append('-:%s%s%s:ALL' % (name, + winbind_sep, + domain)) + return output diff --git a/python/samba/gp/vgp_files_ext.py b/python/samba/gp/vgp_files_ext.py new file mode 100644 index 0000000..78bfc28 --- /dev/null +++ b/python/samba/gp/vgp_files_ext.py @@ -0,0 +1,140 @@ +# vgp_files_ext samba gpo policy +# Copyright (C) David Mulder 2020 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os, pwd, grp +from samba.gp.gpclass import gp_xml_ext, check_safe_path, gp_file_applier +from tempfile import NamedTemporaryFile +from shutil import copyfile, move +from samba.gp.util.logging import log + +def calc_mode(entry): + mode = 0o000 + for permissions in entry.findall('permissions'): + ptype = permissions.get('type') + if ptype == 'user': + if permissions.find('read') is not None: + mode |= 0o400 + if permissions.find('write') is not None: + mode |= 0o200 + if permissions.find('execute') is not None: + mode |= 0o100 + elif ptype == 'group': + if permissions.find('read') is not None: + mode |= 0o040 + if permissions.find('write') is not None: + mode |= 0o020 + if permissions.find('execute') is not None: + mode |= 0o010 + elif ptype == 'other': + if permissions.find('read') is not None: + mode |= 0o004 + if permissions.find('write') is not None: + mode |= 0o002 + if permissions.find('execute') is not None: + mode |= 0o001 + return mode + +def stat_from_mode(mode): + stat = '-' + for i in range(6, -1, -3): + mask = {0o4: 'r', 0o2: 'w', 0o1: 'x'} + for x in mask.keys(): + if mode & (x << i): + stat += mask[x] + else: + stat += '-' + return stat + +def source_file_change(fname): + if os.path.exists(fname): + return b'%d' % os.stat(fname).st_ctime + +class vgp_files_ext(gp_xml_ext, gp_file_applier): + def __str__(self): + return 'VGP/Unix Settings/Files' + + def process_group_policy(self, deleted_gpo_list, changed_gpo_list): + for guid, settings in deleted_gpo_list: + if str(self) in settings: + for attribute, _ in settings[str(self)].items(): + self.unapply(guid, attribute, attribute) + + for gpo in changed_gpo_list: + if gpo.file_sys_path: + xml = 'MACHINE/VGP/VTLA/Unix/Files/manifest.xml' + path = os.path.join(gpo.file_sys_path, xml) + xml_conf = self.parse(path) + if not xml_conf: + continue + policy = xml_conf.find('policysetting') + data = policy.find('data') + for entry in data.findall('file_properties'): + local_path = self.lp.cache_path('gpo_cache') + source = entry.find('source').text + source_file = os.path.join(local_path, + os.path.dirname(check_safe_path(path)).upper(), + source.upper()) + if not os.path.exists(source_file): + log.warn('Source file does not exist', source_file) + continue + target = entry.find('target').text + user = entry.find('user').text + group = entry.find('group').text + mode = calc_mode(entry) + + # The attribute is simply the target file. + attribute = target + # The value hash is generated from the source file last + # change stamp, the user, the group, and the mode, ensuring + # any changes to this GPO will cause the file to be + # rewritten. + value_hash = self.generate_value_hash( + source_file_change(source_file), + user, group, b'%d' % mode) + def applier_func(source_file, target, user, group, mode): + with NamedTemporaryFile(dir=os.path.dirname(target), + delete=False) as f: + copyfile(source_file, f.name) + os.chown(f.name, pwd.getpwnam(user).pw_uid, + grp.getgrnam(group).gr_gid) + os.chmod(f.name, mode) + move(f.name, target) + return [target] + self.apply(gpo.name, attribute, value_hash, applier_func, + source_file, target, user, group, mode) + + def rsop(self, gpo): + output = {} + xml = 'MACHINE/VGP/VTLA/Unix/Files/manifest.xml' + if gpo.file_sys_path: + path = os.path.join(gpo.file_sys_path, xml) + xml_conf = self.parse(path) + if not xml_conf: + return output + policy = xml_conf.find('policysetting') + data = policy.find('data') + for entry in data.findall('file_properties'): + source = entry.find('source').text + target = entry.find('target').text + user = entry.find('user').text + group = entry.find('group').text + mode = calc_mode(entry) + p = '%s\t%s\t%s\t%s -> %s' % \ + (stat_from_mode(mode), user, group, target, source) + if str(self) not in output.keys(): + output[str(self)] = [] + output[str(self)].append(p) + return output diff --git a/python/samba/gp/vgp_issue_ext.py b/python/samba/gp/vgp_issue_ext.py new file mode 100644 index 0000000..266e92b --- /dev/null +++ b/python/samba/gp/vgp_issue_ext.py @@ -0,0 +1,90 @@ +# vgp_issue_ext samba gpo policy +# Copyright (C) David Mulder 2020 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os +from samba.gp.gpclass import gp_xml_ext, gp_misc_applier + +class vgp_issue_ext(gp_xml_ext, gp_misc_applier): + def unapply(self, guid, issue, attribute, value): + if attribute != 'issue': + raise ValueError('"%s" is not a message attribute' % attribute) + msg = value + data = self.parse_value(value) + if os.path.exists(issue): + with open(issue, 'r') as f: + current = f.read() + else: + current = '' + # Only overwrite the msg if it hasn't been modified. It may have been + # modified by another GPO. + if 'new_val' not in data or current.strip() == data['new_val'].strip(): + msg = data['old_val'] + with open(issue, 'w') as w: + if msg: + w.write(msg) + else: + w.truncate() + self.cache_remove_attribute(guid, attribute) + + def apply(self, guid, issue, text): + if os.path.exists(issue): + with open(issue, 'r') as f: + current = f.read() + else: + current = '' + if current != text.text: + with open(issue, 'w') as w: + w.write(text.text) + data = self.generate_value(old_val=current, new_val=text.text) + self.cache_add_attribute(guid, 'issue', data) + + def __str__(self): + return 'Unix Settings/Issue' + + def process_group_policy(self, deleted_gpo_list, changed_gpo_list, + issue='/etc/issue'): + for guid, settings in deleted_gpo_list: + if str(self) in settings: + for attribute, msg in settings[str(self)].items(): + self.unapply(guid, issue, attribute, msg) + + for gpo in changed_gpo_list: + if gpo.file_sys_path: + xml = 'MACHINE/VGP/VTLA/Unix/Issue/manifest.xml' + path = os.path.join(gpo.file_sys_path, xml) + xml_conf = self.parse(path) + if not xml_conf: + continue + policy = xml_conf.find('policysetting') + data = policy.find('data') + text = data.find('text') + self.apply(gpo.name, issue, text) + + def rsop(self, gpo): + output = {} + if gpo.file_sys_path: + xml = 'MACHINE/VGP/VTLA/Unix/Issue/manifest.xml' + path = os.path.join(gpo.file_sys_path, xml) + xml_conf = self.parse(path) + if not xml_conf: + return output + policy = xml_conf.find('policysetting') + data = policy.find('data') + filename = data.find('filename') + text = data.find('text') + mfile = os.path.join('/etc', filename.text) + output[mfile] = text.text + return output diff --git a/python/samba/gp/vgp_motd_ext.py b/python/samba/gp/vgp_motd_ext.py new file mode 100644 index 0000000..845a5c4 --- /dev/null +++ b/python/samba/gp/vgp_motd_ext.py @@ -0,0 +1,90 @@ +# vgp_motd_ext samba gpo policy +# Copyright (C) David Mulder 2020 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os +from samba.gp.gpclass import gp_xml_ext, gp_misc_applier + +class vgp_motd_ext(gp_xml_ext, gp_misc_applier): + def unapply(self, guid, motd, attribute, value): + if attribute != 'motd': + raise ValueError('"%s" is not a message attribute' % attribute) + msg = value + data = self.parse_value(value) + if os.path.exists(motd): + with open(motd, 'r') as f: + current = f.read() + else: + current = '' + # Only overwrite the msg if it hasn't been modified. It may have been + # modified by another GPO. + if 'new_val' not in data or current.strip() == data['new_val'].strip(): + msg = data['old_val'] + with open(motd, 'w') as w: + if msg: + w.write(msg) + else: + w.truncate() + self.cache_remove_attribute(guid, attribute) + + def apply(self, guid, motd, text): + if os.path.exists(motd): + with open(motd, 'r') as f: + current = f.read() + else: + current = '' + if current != text.text: + with open(motd, 'w') as w: + w.write(text.text) + data = self.generate_value(old_val=current, new_val=text.text) + self.cache_add_attribute(guid, 'motd', data) + + def __str__(self): + return 'Unix Settings/Message of the Day' + + def process_group_policy(self, deleted_gpo_list, changed_gpo_list, + motd='/etc/motd'): + for guid, settings in deleted_gpo_list: + if str(self) in settings: + for attribute, msg in settings[str(self)].items(): + self.unapply(guid, motd, attribute, msg) + + for gpo in changed_gpo_list: + if gpo.file_sys_path: + xml = 'MACHINE/VGP/VTLA/Unix/MOTD/manifest.xml' + path = os.path.join(gpo.file_sys_path, xml) + xml_conf = self.parse(path) + if not xml_conf: + continue + policy = xml_conf.find('policysetting') + data = policy.find('data') + text = data.find('text') + self.apply(gpo.name, motd, text) + + def rsop(self, gpo): + output = {} + if gpo.file_sys_path: + xml = 'MACHINE/VGP/VTLA/Unix/MOTD/manifest.xml' + path = os.path.join(gpo.file_sys_path, xml) + xml_conf = self.parse(path) + if not xml_conf: + return output + policy = xml_conf.find('policysetting') + data = policy.find('data') + filename = data.find('filename') + text = data.find('text') + mfile = os.path.join('/etc', filename.text) + output[mfile] = text.text + return output diff --git a/python/samba/gp/vgp_openssh_ext.py b/python/samba/gp/vgp_openssh_ext.py new file mode 100644 index 0000000..6e0ab77 --- /dev/null +++ b/python/samba/gp/vgp_openssh_ext.py @@ -0,0 +1,115 @@ +# vgp_openssh_ext samba group policy +# Copyright (C) David Mulder 2020 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os +import re +from io import BytesIO +from samba.gp.gpclass import gp_xml_ext, gp_file_applier +from samba.common import get_bytes + +intro = b''' +### autogenerated by samba +# +# This file is generated by the vgp_openssh_ext Group Policy +# Client Side Extension. To modify the contents of this file, +# modify the appropriate Group Policy objects which apply +# to this machine. DO NOT MODIFY THIS FILE DIRECTLY. +# + +''' + +# For each key value pair in sshd_config, the first obtained value will be +# used. We must insert config files in reverse, so that the last applied policy +# takes precedence. +def select_next_conf(directory): + configs = [re.match(r'(\d+)', f) for f in os.listdir(directory)] + conf_ids = [int(m.group(1)) for m in configs if m] + conf_ids.append(9000000000) # The starting node + conf_id = min(conf_ids)-1 + return os.path.join(directory, '%010d_gp.conf' % conf_id) + +class vgp_openssh_ext(gp_xml_ext, gp_file_applier): + def __str__(self): + return 'VGP/Unix Settings/OpenSSH' + + def process_group_policy(self, deleted_gpo_list, changed_gpo_list, + cfg_dir='/etc/ssh/sshd_config.d'): + for guid, settings in deleted_gpo_list: + if str(self) in settings: + for attribute, sshd_config in settings[str(self)].items(): + self.unapply(guid, attribute, sshd_config) + + for gpo in changed_gpo_list: + if gpo.file_sys_path: + xml = 'MACHINE/VGP/VTLA/SshCfg/SshD/manifest.xml' + path = os.path.join(gpo.file_sys_path, xml) + xml_conf = self.parse(path) + if not xml_conf: + continue + policy = xml_conf.find('policysetting') + data = policy.find('data') + configfile = data.find('configfile') + for configsection in configfile.findall('configsection'): + if configsection.find('sectionname').text: + continue + settings = {} + for kv in configsection.findall('keyvaluepair'): + settings[kv.find('key')] = kv.find('value') + raw = BytesIO() + for k, v in settings.items(): + raw.write(b'%s %s\n' % + (get_bytes(k.text), get_bytes(v.text))) + # Each GPO applies only one set of OpenSSH settings, in a + # single file, so the attribute does not need uniqueness. + attribute = self.generate_attribute(gpo.name) + # The value hash is generated from the raw data we will + # write to the OpenSSH settings file, ensuring any changes + # to this GPO will cause the file to be rewritten. + value_hash = self.generate_value_hash(raw.getvalue()) + if not os.path.isdir(cfg_dir): + os.mkdir(cfg_dir, 0o640) + def applier_func(cfg_dir, raw): + filename = select_next_conf(cfg_dir) + f = open(filename, 'wb') + f.write(intro) + f.write(raw.getvalue()) + os.chmod(filename, 0o640) + f.close() + return [filename] + self.apply(gpo.name, attribute, value_hash, applier_func, + cfg_dir, raw) + raw.close() + + def rsop(self, gpo): + output = {} + if gpo.file_sys_path: + xml = 'MACHINE/VGP/VTLA/SshCfg/SshD/manifest.xml' + path = os.path.join(gpo.file_sys_path, xml) + xml_conf = self.parse(path) + if not xml_conf: + return output + policy = xml_conf.find('policysetting') + data = policy.find('data') + configfile = data.find('configfile') + for configsection in configfile.findall('configsection'): + if configsection.find('sectionname').text: + continue + for kv in configsection.findall('keyvaluepair'): + if str(self) not in output.keys(): + output[str(self)] = {} + output[str(self)][kv.find('key').text] = \ + kv.find('value').text + return output diff --git a/python/samba/gp/vgp_startup_scripts_ext.py b/python/samba/gp/vgp_startup_scripts_ext.py new file mode 100644 index 0000000..c0edb16 --- /dev/null +++ b/python/samba/gp/vgp_startup_scripts_ext.py @@ -0,0 +1,136 @@ +# vgp_startup_scripts_ext samba gpo policy +# Copyright (C) David Mulder 2021 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os +from samba.gp.gpclass import gp_xml_ext, check_safe_path, gp_file_applier +from tempfile import NamedTemporaryFile +from samba.common import get_bytes +from subprocess import Popen + +intro = b''' +### autogenerated by samba +# +# This file is generated by the vgp_startup_scripts_ext Group Policy +# Client Side Extension. To modify the contents of this file, +# modify the appropriate Group Policy objects which apply +# to this machine. DO NOT MODIFY THIS FILE DIRECTLY. +# + +''' + +class vgp_startup_scripts_ext(gp_xml_ext, gp_file_applier): + def __str__(self): + return 'VGP/Unix Settings/Startup Scripts' + + def process_group_policy(self, deleted_gpo_list, changed_gpo_list, + cdir='/etc/cron.d'): + for guid, settings in deleted_gpo_list: + if str(self) in settings: + for attribute, script in settings[str(self)].items(): + self.unapply(guid, attribute, script) + + for gpo in changed_gpo_list: + if gpo.file_sys_path: + xml = 'MACHINE/VGP/VTLA/Unix/Scripts/Startup/manifest.xml' + path = os.path.join(gpo.file_sys_path, xml) + xml_conf = self.parse(path) + if not xml_conf: + continue + policy = xml_conf.find('policysetting') + data = policy.find('data') + attributes = [] + for listelement in data.findall('listelement'): + local_path = self.lp.cache_path('gpo_cache') + script = listelement.find('script').text + script_file = os.path.join(local_path, + os.path.dirname(check_safe_path(path)).upper(), + script.upper()) + parameters = listelement.find('parameters') + if parameters is not None: + parameters = parameters.text + else: + parameters = '' + value_hash = listelement.find('hash').text + attribute = self.generate_attribute(script_file, + parameters) + attributes.append(attribute) + run_as = listelement.find('run_as') + if run_as is not None: + run_as = run_as.text + else: + run_as = 'root' + run_once = listelement.find('run_once') is not None + if run_once: + def applier_func(script_file, parameters): + Popen(['/bin/sh %s %s' % (script_file, parameters)], + shell=True).wait() + # Run once scripts don't create a file to unapply, + # so their is nothing to return. + return [] + self.apply(gpo.name, attribute, value_hash, applier_func, + script_file, parameters) + else: + def applier_func(run_as, script_file, parameters): + entry = '@reboot %s %s %s' % (run_as, script_file, + parameters) + with NamedTemporaryFile(prefix='gp_', dir=cdir, + delete=False) as f: + f.write(intro) + f.write(get_bytes(entry)) + os.chmod(f.name, 0o700) + return [f.name] + self.apply(gpo.name, attribute, value_hash, applier_func, + run_as, script_file, parameters) + + self.clean(gpo.name, keep=attributes) + + def rsop(self, gpo): + output = {} + xml = 'MACHINE/VGP/VTLA/Unix/Scripts/Startup/manifest.xml' + if gpo.file_sys_path: + path = os.path.join(gpo.file_sys_path, xml) + xml_conf = self.parse(path) + if not xml_conf: + return output + policy = xml_conf.find('policysetting') + data = policy.find('data') + for listelement in data.findall('listelement'): + local_path = self.lp.cache_path('gpo_cache') + script = listelement.find('script').text + script_file = os.path.join(local_path, + os.path.dirname(check_safe_path(path)).upper(), + script.upper()) + parameters = listelement.find('parameters') + if parameters is not None: + parameters = parameters.text + else: + parameters = '' + run_as = listelement.find('run_as') + if run_as is not None: + run_as = run_as.text + else: + run_as = 'root' + run_once = listelement.find('run_once') is not None + if run_once: + entry = 'Run once as: %s `%s %s`' % (run_as, script_file, + parameters) + else: + entry = '@reboot %s %s %s' % (run_as, script_file, + parameters) + if str(self) not in output.keys(): + output[str(self)] = [] + output[str(self)].append(entry) + return output diff --git a/python/samba/gp/vgp_sudoers_ext.py b/python/samba/gp/vgp_sudoers_ext.py new file mode 100644 index 0000000..b388d8b --- /dev/null +++ b/python/samba/gp/vgp_sudoers_ext.py @@ -0,0 +1,97 @@ +# vgp_sudoers_ext samba gpo policy +# Copyright (C) David Mulder 2020 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os +from samba.gp.gpclass import gp_xml_ext, gp_file_applier +from samba.gp.gp_sudoers_ext import sudo_applier_func + +class vgp_sudoers_ext(gp_xml_ext, gp_file_applier): + def __str__(self): + return 'VGP/Unix Settings/Sudo Rights' + + def process_group_policy(self, deleted_gpo_list, changed_gpo_list, + sdir='/etc/sudoers.d'): + for guid, settings in deleted_gpo_list: + if str(self) in settings: + for attribute, sudoers in settings[str(self)].items(): + self.unapply(guid, attribute, sudoers) + + for gpo in changed_gpo_list: + if gpo.file_sys_path: + xml = 'MACHINE/VGP/VTLA/Sudo/SudoersConfiguration/manifest.xml' + path = os.path.join(gpo.file_sys_path, xml) + xml_conf = self.parse(path) + if not xml_conf: + continue + policy = xml_conf.find('policysetting') + data = policy.find('data') + sudo_entries = [] + for entry in data.findall('sudoers_entry'): + command = entry.find('command').text + user = entry.find('user').text + listelements = entry.findall('listelement') + principals = [] + for listelement in listelements: + principals.extend(listelement.findall('principal')) + if len(principals) > 0: + uname = ','.join([u.text if u.attrib['type'] == 'user' + else '%s%%' % u.text for u in principals]) + else: + uname = 'ALL' + nopassword = entry.find('password') is None + np_entry = ' NOPASSWD:' if nopassword else '' + p = '%s ALL=(%s)%s %s' % (uname, user, np_entry, command) + sudo_entries.append(p) + # Each GPO applies only one set of sudoers, in a + # set of files, so the attribute does not need uniqueness. + attribute = self.generate_attribute(gpo.name) + # The value hash is generated from the sudo_entries, ensuring + # any changes to this GPO will cause the files to be rewritten. + value_hash = self.generate_value_hash(*sudo_entries) + self.apply(gpo.name, attribute, value_hash, sudo_applier_func, + sdir, sudo_entries) + # Cleanup any old entries that are no longer part of the policy + self.clean(gpo.name, keep=[attribute]) + + def rsop(self, gpo): + output = {} + xml = 'MACHINE/VGP/VTLA/Sudo/SudoersConfiguration/manifest.xml' + if gpo.file_sys_path: + path = os.path.join(gpo.file_sys_path, xml) + xml_conf = self.parse(path) + if not xml_conf: + return output + policy = xml_conf.find('policysetting') + data = policy.find('data') + for entry in data.findall('sudoers_entry'): + command = entry.find('command').text + user = entry.find('user').text + listelements = entry.findall('listelement') + principals = [] + for listelement in listelements: + principals.extend(listelement.findall('principal')) + if len(principals) > 0: + uname = ','.join([u.text if u.attrib['type'] == 'user' + else '%s%%' % u.text for u in principals]) + else: + uname = 'ALL' + nopassword = entry.find('password') is None + np_entry = ' NOPASSWD:' if nopassword else '' + p = '%s ALL=(%s)%s %s' % (uname, user, np_entry, command) + if str(self) not in output.keys(): + output[str(self)] = [] + output[str(self)].append(p) + return output diff --git a/python/samba/gp/vgp_symlink_ext.py b/python/samba/gp/vgp_symlink_ext.py new file mode 100644 index 0000000..4f85264 --- /dev/null +++ b/python/samba/gp/vgp_symlink_ext.py @@ -0,0 +1,76 @@ +# vgp_symlink_ext samba gpo policy +# Copyright (C) David Mulder 2020 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os +from samba.gp.gpclass import gp_xml_ext, gp_file_applier +from samba.gp.util.logging import log + +class vgp_symlink_ext(gp_xml_ext, gp_file_applier): + def __str__(self): + return 'VGP/Unix Settings/Symbolic Links' + + def process_group_policy(self, deleted_gpo_list, changed_gpo_list): + for guid, settings in deleted_gpo_list: + if str(self) in settings: + for attribute, symlink in settings[str(self)].items(): + self.unapply(guid, attribute, symlink) + + for gpo in changed_gpo_list: + if gpo.file_sys_path: + xml = 'MACHINE/VGP/VTLA/Unix/Symlink/manifest.xml' + path = os.path.join(gpo.file_sys_path, xml) + xml_conf = self.parse(path) + if not xml_conf: + continue + policy = xml_conf.find('policysetting') + data = policy.find('data') + for entry in data.findall('file_properties'): + source = entry.find('source').text + target = entry.find('target').text + # We can only create a single instance of the target, so + # this becomes our unchanging attribute. + attribute = target + # The changeable part of our policy is the source (the + # thing the target points to), so our value hash is based + # on the source. + value_hash = self.generate_value_hash(source) + def applier_func(source, target): + if not os.path.exists(target): + os.symlink(source, target) + return [target] + else: + log.warn('Symlink destination exists', target) + return [] + self.apply(gpo.name, attribute, value_hash, applier_func, + source, target) + + def rsop(self, gpo): + output = {} + xml = 'MACHINE/VGP/VTLA/Unix/Symlink/manifest.xml' + if gpo.file_sys_path: + path = os.path.join(gpo.file_sys_path, xml) + xml_conf = self.parse(path) + if not xml_conf: + return output + policy = xml_conf.find('policysetting') + data = policy.find('data') + for entry in data.findall('file_properties'): + source = entry.find('source').text + target = entry.find('target').text + if str(self) not in output.keys(): + output[str(self)] = [] + output[str(self)].append('ln -s %s %s' % (source, target)) + return output diff --git a/python/samba/gp_parse/__init__.py b/python/samba/gp_parse/__init__.py new file mode 100644 index 0000000..d45b9c5 --- /dev/null +++ b/python/samba/gp_parse/__init__.py @@ -0,0 +1,185 @@ +# GPO Parser for generic extensions +# +# Copyright (C) Andrew Bartlett 2018 +# Written by Garming Sam +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +from xml.dom import minidom +from io import BytesIO +from xml.etree.ElementTree import ElementTree, fromstring, tostring +from hashlib import md5 +from samba.common import get_bytes + + +ENTITY_USER_ID = 0 +ENTITY_SDDL_ACL = 1 +ENTITY_NETWORK_PATH = 2 + + +class GPNoParserException(Exception): + pass + +class GPGeneralizeException(Exception): + pass + + +def entity_type_to_string(ent_type): + type_str = None + + if ent_type == ENTITY_USER_ID: + type_str = "USER_ID" + elif ent_type == ENTITY_SDDL_ACL: + type_str = "SDDL_ACL" + elif ent_type == ENTITY_NETWORK_PATH: + type_str = "NETWORK_PATH" + + return type_str + + +# [MS-GPIPSEC] (LDAP) +# [MS-GPDPC] Deployed Printer Connections (LDAP) +# [MS-GPPREF] Preferences Extension (XML) +# [MS-GPWL] Wireless/Wired Protocol Extension (LDAP) +class GPParser(object): + encoding = 'utf-16' + output_encoding = 'utf-8' + + def parse(self, contents): + pass + + def write_xml(self, filename): + with open(filename, 'w') as f: + f.write('') + + def load_xml(self, filename): + pass + + def write_binary(self, filename): + raise GPNoParserException("This file has no parser available.") + + def write_pretty_xml(self, xml_element, handle): + # Add the xml header as well as format it nicely. + # ElementTree doesn't have a pretty-print, so use minidom. + + et = ElementTree(xml_element) + temporary_bytes = BytesIO() + et.write(temporary_bytes, encoding=self.output_encoding, + xml_declaration=True) + minidom_parsed = minidom.parseString(temporary_bytes.getvalue()) + handle.write(minidom_parsed.toprettyxml(encoding=self.output_encoding)) + + def new_xml_entity(self, name, ent_type): + identifier = md5(get_bytes(name)).hexdigest() + + type_str = entity_type_to_string(ent_type) + + if type_str is None: + raise GPGeneralizeException("No such entity type") + + # For formatting reasons, align the length of the entities + longest = entity_type_to_string(ENTITY_NETWORK_PATH) + type_str = type_str.center(len(longest), '_') + + return "&SAMBA__{}__{}__;".format(type_str, identifier) + + def generalize_xml(self, root, out_file, global_entities): + entities = [] + + # Locate all user_id and all ACLs + user_ids = root.findall('.//*[@user_id="TRUE"]') + user_ids.sort(key = lambda x: x.tag) + + for elem in user_ids: + old_text = elem.text + if old_text is None or old_text == '': + continue + + if old_text in global_entities: + elem.text = global_entities[old_text] + entities.append((elem.text, old_text)) + else: + elem.text = self.new_xml_entity(old_text, + ENTITY_USER_ID) + + entities.append((elem.text, old_text)) + global_entities.update([(old_text, elem.text)]) + + acls = root.findall('.//*[@acl="TRUE"]') + acls.sort(key = lambda x: x.tag) + + for elem in acls: + old_text = elem.text + + if old_text is None or old_text == '': + continue + + if old_text in global_entities: + elem.text = global_entities[old_text] + entities.append((elem.text, old_text)) + else: + elem.text = self.new_xml_entity(old_text, + ENTITY_SDDL_ACL) + + entities.append((elem.text, old_text)) + global_entities.update([(old_text, elem.text)]) + + share_paths = root.findall('.//*[@network_path="TRUE"]') + share_paths.sort(key = lambda x: x.tag) + + for elem in share_paths: + old_text = elem.text + + if old_text is None or old_text == '': + continue + + stripped = old_text.lstrip('\\') + file_server = stripped.split('\\')[0] + + server_index = old_text.find(file_server) + + remaining = old_text[server_index + len(file_server):] + old_text = old_text[:server_index] + file_server + + if old_text in global_entities: + elem.text = global_entities[old_text] + remaining + to_put = global_entities[old_text] + entities.append((to_put, old_text)) + else: + to_put = self.new_xml_entity(old_text, + ENTITY_NETWORK_PATH) + elem.text = to_put + remaining + + entities.append((to_put, old_text)) + global_entities.update([(old_text, to_put)]) + + # Call any file specific customization of entities + # (which appear in any subclasses). + entities.extend(self.custom_entities(root, global_entities)) + + output_xml = tostring(root) + + for ent in entities: + entb = get_bytes(ent[0]) + output_xml = output_xml.replace(entb.replace(b'&', b'&'), entb) + + with open(out_file, 'wb') as f: + f.write(output_xml) + + return entities + + def custom_entities(self, root, global_entities): + # Override this method to do special entity handling + return [] diff --git a/python/samba/gp_parse/gp_aas.py b/python/samba/gp_parse/gp_aas.py new file mode 100644 index 0000000..7aa19c0 --- /dev/null +++ b/python/samba/gp_parse/gp_aas.py @@ -0,0 +1,25 @@ +# GPO Parser for application advertise extensions +# +# Copyright (C) Andrew Bartlett 2018 +# Written by Garming Sam +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +from samba.gp_parse import GPParser + +# [MS-GPSI] Software Installation Protocol (+LDAP) +class GPAasParser(GPParser): + # TODO More work needed to deconstruct format + pass diff --git a/python/samba/gp_parse/gp_csv.py b/python/samba/gp_parse/gp_csv.py new file mode 100644 index 0000000..ebe9c4b --- /dev/null +++ b/python/samba/gp_parse/gp_csv.py @@ -0,0 +1,102 @@ +# GPO Parser for audit extensions +# +# Copyright (C) Andrew Bartlett 2018 +# Written by Garming Sam +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import codecs +import csv +import io + +from io import BytesIO +from xml.etree.ElementTree import Element, SubElement +from samba.gp_parse import GPParser +# [MS-GPAC] Group Policy Audit Configuration +class GPAuditCsvParser(GPParser): + encoding = 'utf-8' + header = None + lines = [] + + def parse(self, contents): + self.lines = [] + reader = csv.reader(codecs.getreader(self.encoding)(BytesIO(contents))) + + self.header = next(reader) + for row in reader: + line = {} + for i, x in enumerate(row): + line[self.header[i]] = x + + self.lines.append(line) + # print line + + def write_xml(self, filename): + with open(filename, 'wb') as f: + root = Element('CsvFile') + child = SubElement(root, 'Row') + for e in self.header: + value = SubElement(child, 'Value') + value.text = e + + for line in self.lines: + child = SubElement(root, 'Row') + for e, title in [(line[x], x) for x in self.header]: + value = SubElement(child, 'Value') + value.text = e + + # Metadata for generalization + if title == 'Policy Target' and e != '': + value.attrib['user_id'] = 'TRUE' + if (title == 'Setting Value' and e != '' and + (line['Subcategory'] == 'RegistryGlobalSacl' or + line['Subcategory'] == 'FileGlobalSacl')): + value.attrib['acl'] = 'TRUE' + + self.write_pretty_xml(root, f) + + + # contents = codecs.open(filename, encoding='utf-8').read() + # self.load_xml(fromstring(contents)) + + def load_xml(self, root): + header = True + self.lines = [] + + for r in root.findall('Row'): + if header: + header = False + self.header = [] + for v in r.findall('Value'): + if not isinstance(v.text, str): + v.text = v.text.decode(self.output_encoding) + self.header.append(v.text) + else: + line = {} + for i, v in enumerate(r.findall('Value')): + line[self.header[i]] = v.text if v.text is not None else '' + if not isinstance(self.header[i], str): + line[self.header[i]] = line[self.header[i]].decode(self.output_encoding) + + self.lines.append(line) + + def write_binary(self, filename): + from io import open + with open(filename, 'w', encoding=self.encoding) as f: + # In this case "binary" means "utf-8", so we let Python do that. + writer = csv.writer(f, quoting=csv.QUOTE_MINIMAL) + writer.writerow(self.header) + for line in self.lines: + writer.writerow([line[x] for x in self.header]) diff --git a/python/samba/gp_parse/gp_inf.py b/python/samba/gp_parse/gp_inf.py new file mode 100644 index 0000000..51035e6 --- /dev/null +++ b/python/samba/gp_parse/gp_inf.py @@ -0,0 +1,378 @@ +# GPO Parser for security extensions +# +# Copyright (C) Andrew Bartlett 2018 +# Written by Garming Sam +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import codecs +import collections +import re + +from abc import ABCMeta, abstractmethod +from xml.etree.ElementTree import Element, SubElement + +from samba.gp_parse import GPParser + +# [MS-GPSB] Security Protocol Extension +class GptTmplInfParser(GPParser): + sections = None + encoding = 'utf-16' + output_encoding = 'utf-16le' + + class AbstractParam: + __metaclass__ = ABCMeta + + def __init__(self): + self.param_list = [] + + @abstractmethod + def parse(self, line): + pass + + @abstractmethod + def write_section(self, header, fp): + pass + + @abstractmethod + def build_xml(self, xml_parent): + pass + + @abstractmethod + def from_xml(self, section): + pass + + class IniParam(AbstractParam): + # param_list = [(Key, Value),] + + def parse(self, line): + key, val = line.split('=') + + self.param_list.append((key.strip(), + val.strip())) + + # print key.strip(), val.strip() + + def write_section(self, header, fp): + if len(self.param_list) == 0: + return + fp.write(u'[%s]\r\n' % header) + for key_out, val_out in self.param_list: + fp.write(u'%s = %s\r\n' % (key_out, + val_out)) + + def build_xml(self, xml_parent): + for key_ini, val_ini in self.param_list: + child = SubElement(xml_parent, 'Parameter') + key = SubElement(child, 'Key') + value = SubElement(child, 'Value') + key.text = key_ini + value.text = val_ini + + def from_xml(self, section): + for param in section.findall('Parameter'): + key = param.find('Key').text + value = param.find('Value').text + if value is None: + value = '' + + self.param_list.append((key, value)) + + class RegParam(AbstractParam): + # param_list = [Value, Value, ...] + def parse(self, line): + # = can occur in a registry key, so don't parse these + self.param_list.append(line) + # print line + + def write_section(self, header, fp): + if len(self.param_list) == 0: + return + fp.write(u'[%s]\r\n' % header) + for param in self.param_list: + fp.write(u'%s\r\n' % param) + + def build_xml(self, xml_parent): + for val_ini in self.param_list: + child = SubElement(xml_parent, 'Parameter') + value = SubElement(child, 'Value') + value.text = val_ini + + def from_xml(self, section): + for param in section.findall('Parameter'): + value = param.find('Value').text + if value is None: + value = '' + + self.param_list.append(value) + + class PrivSIDListParam(AbstractParam): + # param_list = [(Key, [SID, SID,..]), + def parse(self, line): + key, val = line.split('=') + + self.param_list.append((key.strip(), + [x.strip() for x in val.split(',')])) + # print line + + def write_section(self, header, fp): + if len(self.param_list) == 0: + return + fp.write(u'[%s]\r\n' % header) + for key_out, val in self.param_list: + val_out = u','.join(val) + fp.write(u'%s = %s\r\n' % (key_out, val_out)) + + def build_xml(self, xml_parent): + for key_ini, sid_list in self.param_list: + child = SubElement(xml_parent, 'Parameter') + key = SubElement(child, 'Key') + key.text = key_ini + for val_ini in sid_list: + value = SubElement(child, 'Value') + value.attrib['user_id'] = 'TRUE' + value.text = val_ini + + def from_xml(self, section): + for param in section.findall('Parameter'): + key = param.find('Key').text + + sid_list = [] + for val in param.findall('Value'): + value = val.text + if value is None: + value = '' + + sid_list.append(value) + + self.param_list.append((key, sid_list)) + + class NameModeACLParam(AbstractParam): + # param_list = [[Name, Mode, ACL],] + def parse(self, line): + parameters = [None, None, None] + current_arg = 0 + + while line != '': + # Read quoted string + if line[:1] == '"': + line = line[1:] + findex = line.find('"') + parameters[current_arg] = line[:findex] + line = line[findex + 1:] + # Skip past delimiter + elif line[:1] == ',': + line = line[1:] + current_arg += 1 + # Read unquoted string + else: + findex = line.find(',') + parameters[current_arg] = line[:findex] + line = line[findex:] + + # print parameters + # print line + self.param_list.append(parameters) + + def write_section(self, header, fp): + if len(self.param_list) == 0: + return + fp.write(u'[%s]\r\n' % header) + for param in self.param_list: + fp.write(u'"%s",%s,"%s"\r\n' % tuple(param)) + + def build_xml(self, xml_parent): + for name_mode_acl in self.param_list: + child = SubElement(xml_parent, 'Parameter') + + value = SubElement(child, 'Value') + value.text = name_mode_acl[0] + + value = SubElement(child, 'Value') + value.text = name_mode_acl[1] + + value = SubElement(child, 'Value') + value.attrib['acl'] = 'TRUE' + value.text = name_mode_acl[2] + + def from_xml(self, section): + for param in section.findall('Parameter'): + name_mode_acl = [x.text if x.text else '' for x in param.findall('Value')] + self.param_list.append(name_mode_acl) + + class MemberSIDListParam(AbstractParam): + # param_list = [([XXXX, Memberof|Members], [SID, SID...]),...] + def parse(self, line): + key, val = line.split('=') + + key = key.strip() + + self.param_list.append((key.split('__'), + [x.strip() for x in val.split(',')])) + # print line + + def write_section(self, header, fp): + if len(self.param_list) == 0: + return + fp.write(u'[%s]\r\n' % header) + + for key, val in self.param_list: + key_out = u'__'.join(key) + val_out = u','.join(val) + fp.write(u'%s = %s\r\n' % (key_out, val_out)) + + def build_xml(self, xml_parent): + for key_ini, sid_list in self.param_list: + child = SubElement(xml_parent, 'Parameter') + key = SubElement(child, 'Key') + key.text = key_ini[0] + key.attrib['member_type'] = key_ini[1] + key.attrib['user_id'] = 'TRUE' + + for val_ini in sid_list: + value = SubElement(child, 'Value') + value.attrib['user_id'] = 'TRUE' + value.text = val_ini + + def from_xml(self, section): + for param in section.findall('Parameter'): + key = param.find('Key') + member_type = key.attrib['member_type'] + + sid_list = [] + for val in param.findall('Value'): + value = val.text + if value is None: + value = '' + + sid_list.append(value) + + self.param_list.append(([key.text, member_type], sid_list)) + + class UnicodeParam(AbstractParam): + def parse(self, line): + # print line + pass + + def write_section(self, header, fp): + fp.write(u'[Unicode]\r\nUnicode=yes\r\n') + + def build_xml(self, xml_parent): + # We do not bother storing this field + pass + + def from_xml(self, section): + # We do not bother storing this field + pass + + class VersionParam(AbstractParam): + def parse(self, line): + # print line + pass + + def write_section(self, header, fp): + out = u'[Version]\r\nsignature="$CHICAGO$"\r\nRevision=1\r\n' + fp.write(out) + + def build_xml(self, xml_parent): + # We do not bother storing this field + pass + + def from_xml(self, section): + # We do not bother storing this field + pass + + def parse(self, contents): + inf_file = contents.decode(self.encoding) + + self.sections = collections.OrderedDict([ + (u'Unicode', self.UnicodeParam()), + (u'Version', self.VersionParam()), + + (u'System Access', self.IniParam()), + (u'Kerberos Policy', self.IniParam()), + (u'System Log', self.IniParam()), + (u'Security Log', self.IniParam()), + (u'Application Log', self.IniParam()), + (u'Event Audit', self.IniParam()), + (u'Registry Values', self.RegParam()), + (u'Privilege Rights', self.PrivSIDListParam()), + (u'Service General Setting', self.NameModeACLParam()), + (u'Registry Keys', self.NameModeACLParam()), + (u'File Security', self.NameModeACLParam()), + (u'Group Membership', self.MemberSIDListParam()), + ]) + + current_param_parser = None + current_header_name = None + + for line in inf_file.splitlines(): + match = re.match(r'\[(.*)\]', line) + if match: + header_name = match.group(1) + if header_name in self.sections: + current_param_parser = self.sections[header_name] + # print current_param_parser + continue + + # print 'using', current_param_parser + current_param_parser.parse(line) + + + def write_binary(self, filename): + with codecs.open(filename, 'wb+', + self.output_encoding) as f: + # Write the byte-order mark + f.write(u'\ufeff') + + for s in self.sections: + self.sections[s].write_section(s, f) + + def write_xml(self, filename): + with open(filename, 'wb') as f: + root = Element('GptTmplInfFile') + + for sec_inf in self.sections: + section = SubElement(root, 'Section') + section.attrib['name'] = sec_inf + + self.sections[sec_inf].build_xml(section) + + self.write_pretty_xml(root, f) + + # contents = codecs.open(filename, encoding='utf-8').read() + # self.load_xml(fromstring(contents)) + + def load_xml(self, root): + self.sections = collections.OrderedDict([ + (u'Unicode', self.UnicodeParam()), + (u'Version', self.VersionParam()), + + (u'System Access', self.IniParam()), + (u'Kerberos Policy', self.IniParam()), + (u'System Log', self.IniParam()), + (u'Security Log', self.IniParam()), + (u'Application Log', self.IniParam()), + (u'Event Audit', self.IniParam()), + (u'Registry Values', self.RegParam()), + (u'Privilege Rights', self.PrivSIDListParam()), + (u'Service General Setting', self.NameModeACLParam()), + (u'Registry Keys', self.NameModeACLParam()), + (u'File Security', self.NameModeACLParam()), + (u'Group Membership', self.MemberSIDListParam()), + ]) + + for s in root.findall('Section'): + self.sections[s.attrib['name']].from_xml(s) diff --git a/python/samba/gp_parse/gp_ini.py b/python/samba/gp_parse/gp_ini.py new file mode 100644 index 0000000..e9b7ad2 --- /dev/null +++ b/python/samba/gp_parse/gp_ini.py @@ -0,0 +1,228 @@ +# GPO Parser for extensions with ini files +# +# Copyright (C) Andrew Bartlett 2018 +# Written by Garming Sam +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import codecs +import collections +import re + +from xml.etree.ElementTree import Element, SubElement +from configparser import ConfigParser +from io import StringIO + +from samba.gp_parse import GPParser, ENTITY_USER_ID + +# [MS-GPFR] Group Policy Folder Redirection +# [MS-GPSCR] Scripts Extension +class GPIniParser(GPParser): + ini_conf = None + + def parse(self, contents): + # Required dict_type in Python 2.7 + self.ini_conf = ConfigParser(dict_type=collections.OrderedDict, + interpolation=None) + self.ini_conf.optionxform = str + + self.ini_conf.read_file(StringIO(contents.decode(self.encoding))) + + def build_xml_parameter(self, section_xml, section, key_ini, val_ini): + child = SubElement(section_xml, 'Parameter') + key = SubElement(child, 'Key') + value = SubElement(child, 'Value') + key.text = key_ini + value.text = val_ini + + return child + + def load_xml_parameter(self, param_xml, section): + key = param_xml.find('Key').text + value = param_xml.find('Value').text + if value is None: + value = '' + self.ini_conf.set(section, key, value) + + return (key, value) + + def build_xml_section(self, root_xml, sec_ini): + section = SubElement(root_xml, 'Section') + section.attrib['name'] = sec_ini + + return section + + def load_xml_section(self, section_xml): + section_name = section_xml.attrib['name'] + self.ini_conf.add_section(section_name) + + return section_name + + def write_xml(self, filename): + with open(filename, 'wb') as f: + root = Element('IniFile') + + for sec_ini in self.ini_conf.sections(): + section = self.build_xml_section(root, sec_ini) + + for key_ini, val_ini in self.ini_conf.items(sec_ini, raw=True): + self.build_xml_parameter(section, sec_ini, key_ini, + val_ini) + + self.write_pretty_xml(root, f) + + # from xml.etree.ElementTree import fromstring + # contents = codecs.open(filename, encoding='utf-8').read() + # self.load_xml(fromstring(contents)) + + def load_xml(self, root): + # Required dict_type in Python 2.7 + self.ini_conf = ConfigParser(dict_type=collections.OrderedDict, + interpolation=None) + self.ini_conf.optionxform = str + + for s in root.findall('Section'): + section_name = self.load_xml_section(s) + + for param in s.findall('Parameter'): + self.load_xml_parameter(param, section_name) + + def write_binary(self, filename): + with codecs.open(filename, 'wb+', self.encoding) as f: + self.ini_conf.write(f) + + +class GPTIniParser(GPIniParser): + encoding = 'utf-8' + + def parse(self, contents): + try: + super().parse(contents) + except UnicodeDecodeError: + # Required dict_type in Python 2.7 + self.ini_conf = ConfigParser(dict_type=collections.OrderedDict, + interpolation=None) + self.ini_conf.optionxform = str + + # Fallback to Latin-1 which RSAT appears to use + self.ini_conf.read_file(StringIO(contents.decode('iso-8859-1'))) + + +class GPScriptsIniParser(GPIniParser): + def build_xml_parameter(self, section_xml, section, key_ini, val_ini): + parent_return = super().build_xml_parameter(section_xml, section, + key_ini, val_ini) + + cmdline = re.match('\\d+CmdLine$', key_ini) + if cmdline is not None: + value = parent_return.find('Value') + value.attrib['network_path'] = 'TRUE' + + return parent_return + + +class GPFDeploy1IniParser(GPIniParser): + def build_xml_parameter(self, section_xml, section, key_ini, val_ini): + parent_return = super().build_xml_parameter(section_xml, section, + key_ini, val_ini) + # Add generalization metadata and parse out SID list + if section.lower() == 'folder_redirection': + # Process the header section + # {GUID} = S-1-1-0;S-1-1-0 + + # Remove the un-split SID values + key = parent_return.find('Value') + parent_return.remove(key) + + sid_list = val_ini.strip().strip(';').split(';') + + for sid in sid_list: + value = SubElement(parent_return, 'Value') + value.text = sid + value.attrib['user_id'] = 'TRUE' + + else: + # Process redirection sections + # Only FullPath should be a network path + if key_ini == 'FullPath': + key = parent_return.find('Value') + key.attrib['network_path'] = 'TRUE' + + return parent_return + + def load_xml_parameter(self, param_xml, section): + # Re-join the SID list before entering ConfigParser + if section.lower() == 'folder_redirection': + key = param_xml.find('Key').text + values = param_xml.findall('Value') + + if len(values) == 1: + # There appears to be a convention of a trailing semi-colon + # with only one value in the SID list. + value = values[0].text + ';' + else: + value = ';'.join([x.text for x in values]) + + self.ini_conf.set(section, key, value) + + return (key, value) + + # Do the normal ini code for other sections + return super().load_xml_parameter(param_xml, section) + + def build_xml_section(self, root_xml, sec_ini): + section = SubElement(root_xml, 'Section') + + if (sec_ini.lower() != 'folder_redirection' and + sec_ini.lower() != 'version'): + guid, sid = sec_ini.split('_') + section.attrib['fdeploy_GUID'] = guid + section.attrib['fdeploy_SID'] = sid + else: + section.attrib['name'] = sec_ini + + return section + + def load_xml_section(self, section_xml): + # Construct the name from GUID + SID if no name exists + if 'name' in section_xml.attrib: + section_name = section_xml.attrib['name'] + else: + guid = section_xml.attrib['fdeploy_GUID'] + sid = section_xml.attrib['fdeploy_SID'] + section_name = guid + '_' + sid + + self.ini_conf.add_section(section_name) + return section_name + + def custom_entities(self, root, global_entities): + entities = [] + fdeploy_sids = root.findall('.//Section[@fdeploy_SID]') + fdeploy_sids.sort(key = lambda x: x.tag) + + for sid in fdeploy_sids: + old_attrib = sid.attrib['fdeploy_SID'] + + if old_attrib in global_entities: + new_attrib = global_entities[old_attrib] + else: + new_attrib = self.new_xml_entity(old_attrib, ENTITY_USER_ID) + entities.append((new_attrib, old_attrib)) + + global_entities.update([(old_attrib, new_attrib)]) + + sid.attrib['fdeploy_SID'] = new_attrib + + return entities diff --git a/python/samba/gp_parse/gp_pol.py b/python/samba/gp_parse/gp_pol.py new file mode 100644 index 0000000..1d5f348 --- /dev/null +++ b/python/samba/gp_parse/gp_pol.py @@ -0,0 +1,151 @@ +# GPO Parser for registry extension +# +# Copyright (C) Andrew Bartlett 2018 +# Written by Garming Sam +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import base64 + +from xml.etree.ElementTree import Element, SubElement + +from samba.dcerpc import preg +from samba.dcerpc import misc +from samba.ndr import ndr_pack, ndr_unpack + +from samba.gp_parse import GPParser + +# [MS-GPREG] +# [MS-GPFAS] Firewall and Advanced Security +# [MS-GPEF] Encrypting File System +# [MS-GPNRPT] Name Resolution Table +class GPPolParser(GPParser): + pol_file = None + + reg_type = { + misc.REG_NONE: "REG_NONE", + misc.REG_SZ: "REG_SZ", + misc.REG_DWORD: "REG_DWORD", + misc.REG_DWORD_BIG_ENDIAN: "REG_DWORD_BIG_ENDIAN", + misc.REG_QWORD: "REG_QWORD", + misc.REG_EXPAND_SZ: "REG_EXPAND_SZ", + misc.REG_MULTI_SZ: "REG_MULTI_SZ", + misc.REG_BINARY: "REG_BINARY" + } + + def map_reg_type(self, val): + ret = self.reg_type.get(val) + if ret is None: + return "REG_UNKNOWN" + return ret + + def parse(self, contents): + self.pol_file = ndr_unpack(preg.file, contents) + + def load_xml(self, root): + self.pol_file = preg.file() + self.pol_file.header.signature = root.attrib['signature'] + self.pol_file.header.version = int(root.attrib['version']) + self.pol_file.num_entries = int(root.attrib['num_entries']) + + entries = [] + for e in root.findall('Entry'): + entry = preg.entry() + entry_type = int(e.attrib['type']) + + entry.type = entry_type + + entry.keyname = e.find('Key').text + value_name = e.find('ValueName').text + if value_name is None: + value_name = '' + + entry.valuename = value_name + # entry.size = int(e.attrib['size']) + + if misc.REG_MULTI_SZ == entry_type: + values = [x.text for x in e.findall('Value')] + if values == [None]: + data = u'\x00' + else: + data = u'\x00'.join(values) + u'\x00\x00' + entry.data = data.encode('utf-16le') + elif (misc.REG_NONE == entry_type): + pass + elif (misc.REG_SZ == entry_type or + misc.REG_EXPAND_SZ == entry_type): + string_val = e.find('Value').text + if string_val is None: + string_val = '' + entry.data = string_val + elif (misc.REG_DWORD == entry_type or + misc.REG_DWORD_BIG_ENDIAN == entry_type or + misc.REG_QWORD == entry_type): + entry.data = int(e.find('Value').text) + else: # REG UNKNOWN or REG_BINARY + entry.data = base64.b64decode(e.find('Value').text) + + entries.append(entry) + + self.pol_file.entries = entries + # print self.pol_file.__ndr_print__() + + def write_xml(self, filename): + with open(filename, 'wb') as f: + root = Element('PolFile') + root.attrib['num_entries'] = str(self.pol_file.num_entries) + root.attrib['signature'] = self.pol_file.header.signature + root.attrib['version'] = str(self.pol_file.header.version) + for entry in self.pol_file.entries: + child = SubElement(root, 'Entry') + # child.attrib['size'] = str(entry.size) + child.attrib['type'] = str(entry.type) + child.attrib['type_name'] = self.map_reg_type(entry.type) + key = SubElement(child, 'Key') + key.text = entry.keyname + valuename = SubElement(child, 'ValueName') + valuename.text = entry.valuename + if misc.REG_MULTI_SZ == entry.type: + multi = entry.data.decode('utf-16').rstrip(u'\x00').split(u'\x00') + # print repr(multi) + for m in multi: + value = SubElement(child, 'Value') + value.text = m + # print tostring(value) + elif (misc.REG_NONE == entry.type or + misc.REG_SZ == entry.type or + misc.REG_DWORD == entry.type or + misc.REG_DWORD_BIG_ENDIAN == entry.type or + misc.REG_QWORD == entry.type or + misc.REG_EXPAND_SZ == entry.type): + value = SubElement(child, 'Value') + value.text = str(entry.data) + # print tostring(value) + else: # REG UNKNOWN or REG_BINARY + value = SubElement(child, 'Value') + value.text = base64.b64encode(entry.data).decode('utf8') + # print tostring(value) + + # print tostring(root) + + self.write_pretty_xml(root, f) + + # contents = codecs.open(filename, encoding='utf-8').read() + # self.load_xml(fromstring(contents)) + + def write_binary(self, filename): + with open(filename, 'wb') as f: + binary_data = ndr_pack(self.pol_file) + f.write(binary_data) diff --git a/python/samba/graph.py b/python/samba/graph.py new file mode 100644 index 0000000..4c4a07f --- /dev/null +++ b/python/samba/graph.py @@ -0,0 +1,820 @@ +# -*- coding: utf-8 -*- +# Graph topology utilities and dot file generation +# +# Copyright (C) Andrew Bartlett 2018. +# +# Written by Douglas Bagnall +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from samba import colour +import sys +from itertools import cycle, groupby + +FONT_SIZE = 10 + + +def reformat_graph_label(s): + """Break DNs over multiple lines, for better shaped and arguably more + readable nodes. We try to split after commas, and if necessary + after hyphens or failing that in arbitrary places.""" + if len(s) < 12: + return s + + s = s.replace(',', ',\n') + pieces = [] + for p in s.split('\n'): + while len(p) > 20: + if '-' in p[2:20]: + q, p = p.split('-', 1) + else: + n = len(p) // 12 + b = len(p) // n + q, p = p[:b], p[b:] + pieces.append(q + '-') + if p: + pieces.append(p) + + return '\\n'.join(pieces) + + +def quote_graph_label(s, reformat=False): + """Escape a string as graphvis requires.""" + # escaping inside quotes is simple in dot, because only " is escaped. + # there is no need to count backslashes in sequences like \\\\" + s = s.replace('"', '\"') + if reformat: + s = reformat_graph_label(s) + return "%s" % s + + +def shorten_vertex_names(vertices, suffix=',...', aggressive=False): + """Replace the common suffix (in practice, the base DN) of a number of + vertices with a short string (default ",..."). If this seems + pointless because the replaced string is very short or the results + seem strange, the original vertices are retained. + + :param vertices: a sequence of vertices to shorten + :param suffix: the replacement string [",..."] + :param aggressive: replace certain common non-suffix strings + + :return: tuple of (rename map, replacements) + + The rename map is a dictionary mapping the old vertex names to + their shortened versions. If no changes are made, replacements + will be empty. + """ + vmap = dict((v, v) for v in vertices) + replacements = [] + + if len(vmap) > 1: + # walk backwards along all the strings until we meet a character + # that is not shared by all. + i = -1 + vlist = list(vmap.values()) + try: + while True: + c = set(x[i] for x in vlist) + if len(c) > 1 or '*' in c: + break + i -= 1 + except IndexError: + # We have indexed beyond the start of a string, which should + # only happen if one node is a strict suffix of all others. + return vmap, replacements + + # add one to get to the last unanimous character. + i += 1 + + # now, we actually really want to split on a comma. So we walk + # back to a comma. + x = vlist[0] + while i < len(x) and x[i] != ',': + i += 1 + + if i >= -len(suffix): + # there is nothing to gain here + return vmap, replacements + + replacements.append((suffix, x[i:])) + + for k, v in vmap.items(): + vmap[k] = v[:i] + suffix + + if aggressive: + # Remove known common annoying strings + for v in vmap.values(): + if ',CN=Servers,' not in v: + break + else: + vmap = dict((k, v.replace(',CN=Servers,', ',**,', 1)) + for k, v in vmap.items()) + replacements.append(('**', 'CN=Servers')) + + for v in vmap.values(): + if not v.startswith('CN=NTDS Settings,'): + break + else: + vmap = dict((k, v.replace('CN=NTDS Settings,', '*,', 1)) + for k, v in vmap.items()) + replacements.append(('*', 'CN=NTDS Settings')) + + return vmap, replacements + + +def compile_graph_key(key_items, nodes_above=None, elisions=None, + prefix='key_', width=2): + """Generate a dot file snippet that acts as a legend for a graph. + + :param key_items: sequence of items (is_vertex, style, label) + :param nodes_above: list of vertices (pushes key into right position) + :param elision: tuple (short, full) indicating suffix replacement + :param prefix: string used to generate key node names ["key_"] + :param width: default width of node lines + + Each item in key_items is a tuple of (is_vertex, style, label). + is_vertex is a boolean indicating whether the item is a vertex + (True) or edge (False). Style is a dot style string for the edge + or vertex. label is the text associated with the key item. + """ + if nodes_above is None: + nodes_above = [] + edge_lines = [] + edge_names = [] + vertex_lines = [] + vertex_names = [] + order_lines = [] + for i, item in enumerate(key_items): + is_vertex, style, label = item + tag = '%s%d_' % (prefix, i) + label = quote_graph_label(label) + name = '%s_label' % tag + + if is_vertex: + order_lines.append(name) + vertex_names.append(name) + vertex_lines.append('%s[label="%s"; %s]' % + (name, label, style)) + else: + edge_names.append(name) + e1 = '%se1' % tag + e2 = '%se2' % tag + order_lines.append(name) + edge_lines.append('subgraph cluster_%s {' % tag) + edge_lines.append('%s[label=src; color="#000000"; group="%s_g"]' % + (e1, tag)) + edge_lines.append('%s[label=dest; color="#000000"; group="%s_g"]' % + (e2, tag)) + edge_lines.append('%s -> %s [constraint = false; %s]' % (e1, e2, + style)) + edge_lines.append(('%s[shape=plaintext; style=solid; width=%f; ' + 'label="%s\\r"]') % + (name, width, label)) + edge_lines.append('}') + + elision_str = '' + if elisions: + for i, elision in enumerate(reversed(elisions)): + order_lines.append('elision%d' % i) + short, long = elision + if short[0] == ',' and long[0] == ',': + short = short[1:] + long = long[1:] + elision_str += ('\nelision%d[shape=plaintext; style=solid; ' + 'label="\\“%s” means “%s”\\r"]\n' + % ((i, short, long))) + + above_lines = [] + if order_lines: + for n in nodes_above: + above_lines.append('"%s" -> %s [style=invis]' % + (n, order_lines[0])) + + s = ('subgraph cluster_key {\n' + 'label="Key";\n' + 'subgraph cluster_key_nodes {\n' + 'label="";\n' + 'color = "invis";\n' + '%s\n' + '}\n' + 'subgraph cluster_key_edges {\n' + 'label="";\n' + 'color = "invis";\n' + '%s\n' + '{%s}\n' + '}\n' + '%s\n' + '}\n' + '%s\n' + '%s [style=invis; weight=9]' + '\n' + % (';\n'.join(vertex_lines), + '\n'.join(edge_lines), + ' '.join(edge_names), + elision_str, + ';\n'.join(above_lines), + ' -> '.join(order_lines), + )) + + return s + + +def dot_graph(vertices, edges, + directed=False, + title=None, + reformat_labels=True, + vertex_colors=None, + edge_colors=None, + edge_labels=None, + vertex_styles=None, + edge_styles=None, + graph_name=None, + shorten_names=False, + key_items=None, + vertex_clusters=None): + """Generate a Graphviz representation of a list of vertices and edges. + + :param vertices: list of vertex names (optional). + :param edges: list of (vertex, vertex) pairs + :param directed: bool: whether the graph is directed + :param title: optional title for the graph + :param reformat_labels: whether to wrap long vertex labels + :param vertex_colors: if not None, a sequence of colours for the vertices + :param edge_colors: if not None, colours for the edges + :param edge_labels: if not None, labels for the edges + :param vertex_styles: if not None, DOT style strings for vertices + :param edge_styles: if not None, DOT style strings for edges + :param graph_name: if not None, name of graph + :param shorten_names: if True, remove common DN suffixes + :param key: (is_vertex, style, description) tuples + :param vertex_clusters: list of subgraph cluster names + + Colour, style, and label lists must be the same length as the + corresponding list of edges or vertices (or None). + + Colours can be HTML RGB strings ("#FF0000") or common names + ("red"), or some other formats you don't want to think about. + + If `vertices` is None, only the vertices mentioned in the edges + are shown, and their appearance can be modified using the + vertex_colors and vertex_styles arguments. Vertices appearing in + the edges but not in the `vertices` list will be shown but their + styles can not be modified. + """ + out = [] + write = out.append + + if vertices is None: + vertices = set(x[0] for x in edges) | set(x[1] for x in edges) + + if shorten_names: + vlist = list(set(x[0] for x in edges) | + set(x[1] for x in edges) | + set(vertices)) + vmap, elisions = shorten_vertex_names(vlist) + vertices = [vmap[x] for x in vertices] + edges = [(vmap[a], vmap[b]) for a, b in edges] + + else: + elisions = None + + if graph_name is None: + graph_name = 'A_samba_tool_production' + + if directed: + graph_type = 'digraph' + connector = '->' + else: + graph_type = 'graph' + connector = '--' + + write('/* generated by samba */') + write('%s %s {' % (graph_type, graph_name)) + if title is not None: + write('label="%s";' % (title,)) + write('fontsize=%s;\n' % (FONT_SIZE)) + write('node[fontname=Helvetica; fontsize=%s];\n' % (FONT_SIZE)) + + prev_cluster = None + cluster_n = 0 + quoted_vertices = [] + for i, v in enumerate(vertices): + v = quote_graph_label(v, reformat_labels) + quoted_vertices.append(v) + attrs = [] + if vertex_clusters and vertex_clusters[i]: + cluster = vertex_clusters[i] + if cluster != prev_cluster: + if prev_cluster is not None: + write("}") + prev_cluster = cluster + n = quote_graph_label(cluster) + if cluster: + write('subgraph cluster_%d {' % cluster_n) + cluster_n += 1 + write('style = "rounded,dotted";') + write('node [style="filled"; fillcolor=white];') + write('label = "%s";' % n) + + if vertex_styles and vertex_styles[i]: + attrs.append(vertex_styles[i]) + if vertex_colors and vertex_colors[i]: + attrs.append('color="%s"' % quote_graph_label(vertex_colors[i])) + if attrs: + write('"%s" [%s];' % (v, ', '.join(attrs))) + else: + write('"%s";' % (v,)) + + if prev_cluster: + write("}") + + for i, edge in enumerate(edges): + a, b = edge + if a is None: + a = "Missing source value" + if b is None: + b = "Missing destination value" + + a = quote_graph_label(a, reformat_labels) + b = quote_graph_label(b, reformat_labels) + + attrs = [] + if edge_labels: + label = quote_graph_label(edge_labels[i]) + attrs.append('label="%s"' % label) + if edge_colors: + attrs.append('color="%s"' % quote_graph_label(edge_colors[i])) + if edge_styles: + attrs.append(edge_styles[i]) # no quoting + if attrs: + write('"%s" %s "%s" [%s];' % (a, connector, b, ', '.join(attrs))) + else: + write('"%s" %s "%s";' % (a, connector, b)) + + if key_items: + key = compile_graph_key(key_items, nodes_above=quoted_vertices, + elisions=elisions) + write(key) + + write('}\n') + return '\n'.join(out) + + +COLOUR_SETS = { + 'ansi': { + 'alternate rows': (colour.DARK_WHITE, colour.BLACK), + 'disconnected': colour.RED, + 'connected': colour.GREEN, + 'transitive': colour.DARK_YELLOW, + 'header': colour.UNDERLINE, + 'reset': colour.C_NORMAL, + }, + 'ansi-heatmap': { + 'alternate rows': (colour.DARK_WHITE, colour.BLACK), + 'disconnected': colour.REV_RED, + 'connected': colour.REV_GREEN, + 'transitive': colour.REV_DARK_YELLOW, + 'header': colour.UNDERLINE, + 'reset': colour.C_NORMAL, + }, + 'xterm-256color': { + 'alternate rows': (colour.xterm_256_colour(39), + colour.xterm_256_colour(45)), + # 'alternate rows': (colour.xterm_256_colour(246), + # colour.xterm_256_colour(247)), + 'disconnected': colour.xterm_256_colour(124, bg=True), + 'connected': colour.xterm_256_colour(112), + 'transitive': colour.xterm_256_colour(214), + 'transitive scale': (colour.xterm_256_colour(190), + colour.xterm_256_colour(184), + colour.xterm_256_colour(220), + colour.xterm_256_colour(214), + colour.xterm_256_colour(208), + ), + 'header': colour.UNDERLINE, + 'reset': colour.C_NORMAL, + }, + 'xterm-256color-heatmap': { + 'alternate rows': (colour.xterm_256_colour(171), + colour.xterm_256_colour(207)), + # 'alternate rows': (colour.xterm_256_colour(246), + # colour.xterm_256_colour(247)), + 'disconnected': colour.xterm_256_colour(124, bg=True), + 'connected': colour.xterm_256_colour(112, bg=True), + 'transitive': colour.xterm_256_colour(214, bg=True), + 'transitive scale': (colour.xterm_256_colour(190, bg=True), + colour.xterm_256_colour(184, bg=True), + colour.xterm_256_colour(220, bg=True), + colour.xterm_256_colour(214, bg=True), + colour.xterm_256_colour(208, bg=True), + ), + 'header': colour.UNDERLINE, + 'reset': colour.C_NORMAL, + }, + None: { + 'alternate rows': ('',), + 'disconnected': '', + 'connected': '', + 'transitive': '', + 'header': '', + 'reset': '', + } +} + +CHARSETS = { + 'utf8': { + 'vertical': '│', + 'horizontal': '─', + 'corner': '╭', + # 'diagonal': '╲', + 'diagonal': '·', + # 'missing': '🕱', + 'missing': '-', + 'right_arrow': '←', + }, + 'ascii': { + 'vertical': '|', + 'horizontal': '-', + 'corner': ',', + 'diagonal': '0', + 'missing': '-', + 'right_arrow': '<-', + } +} + + +def find_transitive_distance(vertices, edges): + all_vertices = (set(vertices) | + set(e[0] for e in edges) | + set(e[1] for e in edges)) + + if all_vertices != set(vertices): + print("there are unknown vertices: %s" % + (all_vertices - set(vertices)), + file=sys.stderr) + + # with n vertices, we are always less than n hops away from + # anywhere else. + inf = len(all_vertices) + distances = {} + for v in all_vertices: + distances[v] = {v: 0} + + for src, dest in edges: + distances[src][dest] = distances[src].get(dest, 1) + + # This algorithm (and implementation) seems very suboptimal. + # potentially O(n^4), though n is smallish. + for i in range(inf): + changed = False + new_distances = {} + for v, d in distances.items(): + new_d = d.copy() + new_distances[v] = new_d + for dest, cost in d.items(): + for leaf, cost2 in distances[dest].items(): + new_cost = cost + cost2 + old_cost = d.get(leaf, inf) + if new_cost < old_cost: + new_d[leaf] = new_cost + changed = True + + distances = new_distances + if not changed: + break + + # filter out unwanted vertices and infinite links + answer = {} + for v in vertices: + answer[v] = {} + for v2 in vertices: + a = distances[v].get(v2, inf) + if a < inf: + answer[v][v2] = a + + return answer + + +def get_transitive_colourer(colours, n_vertices): + if 'transitive scale' in colours: + scale = colours['transitive scale'] + m = len(scale) + n = 1 + int(n_vertices ** 0.5) + + def f(link): + if not isinstance(link, int): + return '' + return scale[min(link * m // n, m - 1)] + + else: + def f(link): + return colours['transitive'] + + return f + + +def distance_matrix(vertices, edges, + utf8=False, + colour=None, + shorten_names=False, + generate_key=False, + grouping_function=None, + row_comments=None): + lines = [] + write = lines.append + + charset = CHARSETS['utf8' if utf8 else 'ascii'] + vertical = charset['vertical'] + horizontal = charset['horizontal'] + corner = charset['corner'] + diagonal = charset['diagonal'] + missing = charset['missing'] + right_arrow = charset['right_arrow'] + + colours = COLOUR_SETS[colour] + + colour_cycle = cycle(colours.get('alternate rows', ('',))) + + if vertices is None: + vertices = sorted(set(x[0] for x in edges) | set(x[1] for x in edges)) + + if grouping_function is not None: + # we sort and colour according to the grouping function + # which can be used to e.g. alternate colours by site. + vertices = sorted(vertices, key=grouping_function) + colour_list = [] + for k, v in groupby(vertices, key=grouping_function): + c = next(colour_cycle) + colour_list.extend(c for x in v) + else: + colour_list = [next(colour_cycle) for v in vertices] + + if shorten_names: + vlist = list(set(x[0] for x in edges) | + set(x[1] for x in edges) | + set(vertices)) + vmap, replacements = shorten_vertex_names(vlist, '+', + aggressive=True) + vertices = [vmap[x] for x in vertices] + edges = [(vmap[a], vmap[b]) for a, b in edges] + + vlen = max(6, max(len(v) for v in vertices)) + + # first, the key for the columns + c_header = colours.get('header', '') + c_disconn = colours.get('disconnected', '') + c_conn = colours.get('connected', '') + c_reset = colours.get('reset', '') + + colour_transitive = get_transitive_colourer(colours, len(vertices)) + + vspace = ' ' * vlen + verticals = '' + write("%*s %s %sdestination%s" % (vlen, '', + ' ' * len(vertices), + c_header, + c_reset)) + for i, v in enumerate(vertices): + j = len(vertices) - i + c = colour_list[i] + if j == 1: + start = '%s%ssource%s' % (vspace[:-6], c_header, c_reset) + else: + start = vspace + write('%s %s%s%s%s%s %s%s' % (start, + verticals, + c_reset, + c, + corner, + horizontal * j, + v, + c_reset + )) + verticals += c + vertical + + connections = find_transitive_distance(vertices, edges) + + for i, v in enumerate(vertices): + c = colour_list[i] + links = connections[v] + row = [] + for v2 in vertices: + link = links.get(v2) + if link is None: + row.append('%s%s' % (c_disconn, missing)) + continue + if link == 0: + row.append('%s%s%s%s' % (c_reset, c, diagonal, c_reset)) + elif link == 1: + row.append('%s1%s' % (c_conn, c_reset)) + else: + ct = colour_transitive(link) + if link > 9: + link = '>' + row.append('%s%s%s' % (ct, link, c_reset)) + + if row_comments is not None and row_comments[i]: + row.append('%s %s %s' % (c_reset, right_arrow, row_comments[i])) + + write('%s%*s%s %s%s' % (c, vlen, v, c_reset, + ''.join(row), c_reset)) + + example_c = next(colour_cycle) + if shorten_names: + write('') + for substitute, original in reversed(replacements): + write("'%s%s%s' stands for '%s%s%s'" % (example_c, + substitute, + c_reset, + example_c, + original, + c_reset)) + if generate_key: + write('') + write("Data can get from %ssource%s to %sdestination%s in the " + "indicated number of steps." % (c_header, c_reset, + c_header, c_reset)) + write("%s%s%s means zero steps (it is the same DC)" % + (example_c, diagonal, c_reset)) + write("%s1%s means a direct link" % (c_conn, c_reset)) + write("%s2%s means a transitive link involving two steps " + "(i.e. one intermediate DC)" % + (colour_transitive(2), c_reset)) + write("%s%s%s means there is no connection, even through other DCs" % + (c_disconn, missing, c_reset)) + + return '\n'.join(lines) + + +def pad_char(char, digits, padding=' '): + if digits == 1: + padding = '' + return ' ' * (digits - 1) + char + padding + + +def transpose_dict_matrix(m): + m2 = {} + for k1, row in m.items(): + for k2, dist in row.items(): + m2.setdefault(k2, {})[k1] = dist + return m2 + + +def full_matrix(rows, + utf8=False, + colour=None, + shorten_names=False, + generate_key=False, + grouping_function=None, + row_comments=None, + colour_scale=None, + digits=1, + ylabel='source', + xlabel='destination', + transpose=True): + lines = [] + write = lines.append + + if transpose: + rows = transpose_dict_matrix(rows) + + use_padding = digits > 1 + + charset = CHARSETS['utf8' if utf8 else 'ascii'] + vertical = pad_char(charset['vertical'], digits) + horizontal = charset['horizontal'] * (digits + use_padding) + corner = pad_char(charset['corner'], digits, + charset['horizontal']) + diagonal = pad_char(charset['diagonal'], digits) + missing = pad_char(charset['missing'], digits) + toobig = pad_char('>', digits) + right_arrow = charset['right_arrow'] + empty = pad_char(' ', digits) + + colours = COLOUR_SETS[colour] + + colour_cycle = cycle(colours.get('alternate rows', ('',))) + vertices = list(rows.keys()) + if grouping_function is not None: + # we sort and colour according to the grouping function + # which can be used to e.g. alternate colours by site. + vertices.sort(key=grouping_function) + colour_list = [] + for k, v in groupby(vertices, key=grouping_function): + c = next(colour_cycle) + colour_list.extend(c for x in v) + else: + colour_list = [next(colour_cycle) for v in vertices] + + if shorten_names: + vmap, replacements = shorten_vertex_names(vertices, '+', + aggressive=True) + rows2 = {} + for vert, r in rows.items(): + rows2[vmap[vert]] = dict((vmap[k], v) for k, v in r.items()) + + rows = rows2 + vertices = list(rows.keys()) + + vlen = max(6, len(xlabel), max(len(v) for v in vertices)) + + # first, the key for the columns + c_header = colours.get('header', '') + c_disconn = colours.get('disconnected', '') + c_conn = colours.get('connected', '') + c_reset = colours.get('reset', '') + + if colour_scale is None: + colour_scale = len(rows) + colour_transitive = get_transitive_colourer(colours, colour_scale) + + vspace = ' ' * vlen + verticals = '' + write("%s %s %s%s%s" % (vspace, + empty * (len(rows) + 1), + c_header, + xlabel, + c_reset)) + for i, v in enumerate(vertices): + j = len(rows) - i + c = colour_list[i] + if j == 1: + start = '%s%s%s%s' % (vspace[:-len(ylabel)], + c_header, + ylabel, + c_reset) + else: + start = vspace + write('%s %s%s%s%s%s %s%s' % (start, + verticals, + c_reset, + c, + corner, + horizontal * j, + v, + c_reset + )) + verticals += '%s%s' % (c, vertical) + + end_cell = '%s%s' % (' ' * use_padding, c_reset) + overflow = False + for i, v in enumerate(vertices): + links = rows[v] + c = colour_list[i] + row = [] + for v2 in vertices: + if v2 not in links: + row.append('%s%s%s' % (c_disconn, missing, c_reset)) + elif v == v2: + row.append('%s%s%s%s' % (c_reset, c, diagonal, c_reset)) + else: + link = links[v2] + if link >= 10 ** digits: + ct = colour_transitive(link) + row.append('%s%s%s' % (ct, toobig, c_reset)) + overflow = True + continue + if link == 0: + ct = c_conn + else: + ct = colour_transitive(link) + row.append('%s%*s%s' % (ct, digits, link, end_cell)) + + if row_comments is not None and row_comments[i]: + row.append('%s %s %s' % (c_reset, right_arrow, row_comments[i])) + + write('%s%*s%s %s%s' % (c, vlen, v, c_reset, + ''.join(row), c_reset)) + + if overflow or shorten_names: + write('') + + if overflow: + write("'%s%s%s' means greater than %d " % + (colour_transitive(10 ** digits), + toobig, + c_reset, + 10 ** digits - 1)) + + if shorten_names: + example_c = next(colour_cycle) + for substitute, original in reversed(replacements): + write("'%s%s%s' stands for '%s%s%s'" % (example_c, + substitute, + c_reset, + example_c, + original, + c_reset)) + + return '\n'.join(lines) diff --git a/python/samba/hostconfig.py b/python/samba/hostconfig.py new file mode 100644 index 0000000..f3c9aad --- /dev/null +++ b/python/samba/hostconfig.py @@ -0,0 +1,81 @@ +# Unix SMB/CIFS implementation. +# Copyright (C) Jelmer Vernooij 2008 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +"""Local host configuration.""" +from .samdb import SamDB + + +class Hostconfig(object): + """Aggregate object that contains all information about the configuration + of a Samba host.""" + + def __init__(self, lp): + self.lp = lp + + def get_shares(self): + return SharesContainer(self.lp) + + def get_samdb(self, session_info, credentials): + """Access the SamDB host. + + :param session_info: Session info to use + :param credentials: Credentials to access the SamDB with + """ + return SamDB(url=self.lp.samdb_url(), + session_info=session_info, credentials=credentials, + lp=self.lp) + + +# TODO: Rather than accessing Loadparm directly here, we should really +# have bindings to the param/shares.c and use those. + + +class SharesContainer(object): + """A shares container.""" + + def __init__(self, lp): + self._lp = lp + + def __getitem__(self, name): + if name == "global": + # [global] is not a share + raise KeyError + return Share(self._lp[name]) + + def __len__(self): + if "global" in self._lp.services(): + return len(self._lp) - 1 + return len(self._lp) + + def keys(self): + return [name for name in self._lp.services() if name != "global"] + + def __iter__(self): + return iter(self.keys()) + + +class Share(object): + """A file share.""" + + def __init__(self, service): + self._service = service + + def __getitem__(self, name): + return self._service[name] + + def __setitem__(self, name, value): + self._service[name] = value diff --git a/python/samba/idmap.py b/python/samba/idmap.py new file mode 100644 index 0000000..321ae8b --- /dev/null +++ b/python/samba/idmap.py @@ -0,0 +1,99 @@ +# Unix SMB/CIFS implementation. +# Copyright (C) 2008 Kai Blin +# +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +"""Convenience functions for using the idmap database.""" + +__docformat__ = "restructuredText" + +import ldb +import samba + + +class IDmapDB(samba.Ldb): + """The IDmap database.""" + + # Mappings for ID_TYPE_UID, ID_TYPE_GID and ID_TYPE_BOTH + TYPE_UID = 1 + TYPE_GID = 2 + TYPE_BOTH = 3 + + def __init__(self, url=None, lp=None, modules_dir=None, session_info=None, + credentials=None, flags=0, options=None): + """Opens the IDMap Database. + + For parameter meanings see the super class (samba.Ldb) + """ + self.lp = lp + if url is None: + url = lp.private_path("idmap.ldb") + + super().__init__(url=url, lp=lp, modules_dir=modules_dir, + session_info=session_info, credentials=credentials, flags=flags, + options=options) + + def connect(self, url=None, flags=0, options=None): + super().connect(url=self.lp.private_path(url), flags=flags, + options=options) + + def increment_xid(self): + """Increment xidNumber, if not present it create and assign it to the lowerBound + + :return xid can that be used for SID/unixid mapping + """ + res = self.search(expression="distinguishedName=CN=CONFIG", base="", + scope=ldb.SCOPE_SUBTREE) + id = res[0].get("xidNumber") + flag = ldb.FLAG_MOD_REPLACE + if id is None: + id = res[0].get("lowerBound") + flag = ldb.FLAG_MOD_ADD + newid = int(str(id)) + 1 + msg = ldb.Message() + msg.dn = ldb.Dn(self, "CN=CONFIG") + msg["xidNumber"] = ldb.MessageElement(str(newid), flag, "xidNumber") + self.modify(msg) + return id + + def setup_name_mapping(self, sid, type, unixid=None): + """Setup a mapping between a sam name and a unix name. + + :param sid: SID of the NT-side of the mapping. + :param unixname: Unix id to map to, if none supplied the next one will be selected + """ + if unixid is None: + unixid = self.increment_xid() + type_string = "" + if type == self.TYPE_UID: + type_string = "ID_TYPE_UID" + elif type == self.TYPE_GID: + type_string = "ID_TYPE_GID" + elif type == self.TYPE_BOTH: + type_string = "ID_TYPE_BOTH" + else: + return + + mod = """ +dn: CN=%s +xidNumber: %s +objectSid: %s +objectClass: sidMap +type: %s +cn: %s + +""" % (sid, unixid, sid, type_string, sid) + self.add(next(self.parse_ldif(mod))[1]) diff --git a/python/samba/join.py b/python/samba/join.py new file mode 100644 index 0000000..8b7e882 --- /dev/null +++ b/python/samba/join.py @@ -0,0 +1,1786 @@ +# python join code +# Copyright Andrew Tridgell 2010 +# Copyright Andrew Bartlett 2010 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +"""Joining a domain.""" + +from samba.auth import system_session +from samba.samdb import SamDB +from samba import gensec, Ldb, drs_utils, arcfour_encrypt, string_to_byte_array +import ldb +import samba +import uuid +from samba.ndr import ndr_pack, ndr_unpack +from samba.dcerpc import security, drsuapi, misc, nbt, lsa, drsblobs, dnsserver, dnsp +from samba.credentials import Credentials, DONT_USE_KERBEROS +from samba.provision import (secretsdb_self_join, provision, provision_fill, + FILL_DRS, FILL_SUBDOMAIN, DEFAULTSITE) +from samba.provision.common import setup_path +from samba.schema import Schema +from samba import descriptor +from samba.net import Net +from samba.provision.sambadns import setup_bind9_dns +from samba import read_and_sub_file +from samba import werror +from base64 import b64encode +from samba import WERRORError, NTSTATUSError +from samba import sd_utils +from samba.dnsserver import ARecord, AAAARecord, CNAMERecord +import random +import time +import re +import os +import tempfile +from collections import OrderedDict +from samba.common import get_string +from samba.netcmd import CommandError +from samba import dsdb, functional_level + + +class DCJoinException(Exception): + + def __init__(self, msg): + super().__init__("Can't join, error: %s" % msg) + + +class DCJoinContext(object): + """Perform a DC join.""" + + def __init__(ctx, logger=None, server=None, creds=None, lp=None, site=None, + netbios_name=None, targetdir=None, domain=None, + machinepass=None, use_ntvfs=False, dns_backend=None, + promote_existing=False, plaintext_secrets=False, + backend_store=None, + backend_store_size=None, + forced_local_samdb=None): + + ctx.logger = logger + ctx.creds = creds + ctx.lp = lp + ctx.site = site + ctx.targetdir = targetdir + ctx.use_ntvfs = use_ntvfs + ctx.plaintext_secrets = plaintext_secrets + ctx.backend_store = backend_store + ctx.backend_store_size = backend_store_size + + ctx.promote_existing = promote_existing + ctx.promote_from_dn = None + + ctx.nc_list = [] + ctx.full_nc_list = [] + + ctx.creds.set_gensec_features(creds.get_gensec_features() | gensec.FEATURE_SEAL) + ctx.net = Net(creds=ctx.creds, lp=ctx.lp) + + ctx.server = server + ctx.forced_local_samdb = forced_local_samdb + + if forced_local_samdb: + ctx.samdb = forced_local_samdb + ctx.server = ctx.samdb.url + else: + if ctx.server: + # work out the DC's site (if not already specified) + if site is None: + ctx.site = ctx.find_dc_site(ctx.server) + else: + # work out the Primary DC for the domain (as well as an + # appropriate site for the new DC) + ctx.logger.info("Finding a writeable DC for domain '%s'" % domain) + ctx.server = ctx.find_dc(domain) + ctx.logger.info("Found DC %s" % ctx.server) + ctx.samdb = SamDB(url="ldap://%s" % ctx.server, + session_info=system_session(), + credentials=ctx.creds, lp=ctx.lp) + + if ctx.site is None: + ctx.site = DEFAULTSITE + + try: + ctx.samdb.search(scope=ldb.SCOPE_BASE, attrs=[]) + except ldb.LdbError as e: + (enum, estr) = e.args + raise DCJoinException(estr) + + ctx.base_dn = str(ctx.samdb.get_default_basedn()) + ctx.root_dn = str(ctx.samdb.get_root_basedn()) + ctx.schema_dn = str(ctx.samdb.get_schema_basedn()) + ctx.config_dn = str(ctx.samdb.get_config_basedn()) + ctx.domsid = security.dom_sid(ctx.samdb.get_domain_sid()) + ctx.forestsid = ctx.domsid + ctx.domain_name = ctx.get_domain_name() + ctx.forest_domain_name = ctx.get_forest_domain_name() + ctx.invocation_id = misc.GUID(str(uuid.uuid4())) + + ctx.dc_ntds_dn = ctx.samdb.get_dsServiceName() + ctx.dc_dnsHostName = ctx.get_dnsHostName() + ctx.behavior_version = ctx.get_behavior_version() + + if machinepass is not None: + ctx.acct_pass = machinepass + else: + ctx.acct_pass = samba.generate_random_machine_password(120, 120) + + ctx.dnsdomain = ctx.samdb.domain_dns_name() + + # the following are all dependent on the new DC's netbios_name (which + # we expect to always be specified, except when cloning a DC) + if netbios_name: + # work out the DNs of all the objects we will be adding + ctx.myname = netbios_name + ctx.samname = "%s$" % ctx.myname + ctx.server_dn = "CN=%s,CN=Servers,CN=%s,CN=Sites,%s" % (ctx.myname, ctx.site, ctx.config_dn) + ctx.ntds_dn = "CN=NTDS Settings,%s" % ctx.server_dn + ctx.acct_dn = "CN=%s,OU=Domain Controllers,%s" % (ctx.myname, ctx.base_dn) + ctx.dnshostname = "%s.%s" % (ctx.myname.lower(), ctx.dnsdomain) + ctx.dnsforest = ctx.samdb.forest_dns_name() + + topology_base = "CN=Topology,CN=Domain System Volume,CN=DFSR-GlobalSettings,CN=System,%s" % ctx.base_dn + if ctx.dn_exists(topology_base): + ctx.topology_dn = "CN=%s,%s" % (ctx.myname, topology_base) + else: + ctx.topology_dn = None + + ctx.SPNs = ["HOST/%s" % ctx.myname, + "HOST/%s" % ctx.dnshostname, + "GC/%s/%s" % (ctx.dnshostname, ctx.dnsforest)] + + res_rid_manager = ctx.samdb.search(scope=ldb.SCOPE_BASE, + attrs=["rIDManagerReference"], + base=ctx.base_dn) + + ctx.rid_manager_dn = res_rid_manager[0]["rIDManagerReference"][0] + + ctx.domaindns_zone = 'DC=DomainDnsZones,%s' % ctx.base_dn + ctx.forestdns_zone = 'DC=ForestDnsZones,%s' % ctx.root_dn + + expr = "(&(objectClass=crossRef)(ncName=%s))" % ldb.binary_encode(ctx.domaindns_zone) + res_domaindns = ctx.samdb.search(scope=ldb.SCOPE_ONELEVEL, + attrs=[], + base=ctx.samdb.get_partitions_dn(), + expression=expr) + if dns_backend is None: + ctx.dns_backend = "NONE" + else: + if len(res_domaindns) == 0: + ctx.dns_backend = "NONE" + print("NO DNS zone information found in source domain, not replicating DNS") + else: + ctx.dns_backend = dns_backend + + ctx.realm = ctx.dnsdomain + + ctx.tmp_samdb = None + + ctx.replica_flags = (drsuapi.DRSUAPI_DRS_INIT_SYNC | + drsuapi.DRSUAPI_DRS_PER_SYNC | + drsuapi.DRSUAPI_DRS_GET_ANC | + drsuapi.DRSUAPI_DRS_GET_NC_SIZE | + drsuapi.DRSUAPI_DRS_NEVER_SYNCED) + + # these elements are optional + ctx.never_reveal_sid = None + ctx.reveal_sid = None + ctx.connection_dn = None + ctx.RODC = False + ctx.krbtgt_dn = None + ctx.drsuapi = None + ctx.managedby = None + ctx.subdomain = False + ctx.adminpass = None + ctx.partition_dn = None + + ctx.dns_a_dn = None + ctx.dns_cname_dn = None + + # Do not normally register 127. addresses but allow override for selftest + ctx.force_all_ips = False + + def del_noerror(ctx, dn, recursive=False): + if recursive: + try: + res = ctx.samdb.search(base=dn, scope=ldb.SCOPE_ONELEVEL, attrs=["dn"]) + except Exception: + return + for r in res: + ctx.del_noerror(r.dn, recursive=True) + try: + ctx.samdb.delete(dn) + print("Deleted %s" % dn) + except Exception: + pass + + def cleanup_old_accounts(ctx, force=False): + res = ctx.samdb.search(base=ctx.samdb.get_default_basedn(), + expression='sAMAccountName=%s' % ldb.binary_encode(ctx.samname), + attrs=["msDS-krbTgtLink", "objectSID"]) + if len(res) == 0: + return + + if not force: + creds = Credentials() + creds.guess(ctx.lp) + try: + creds.set_machine_account(ctx.lp) + creds.set_kerberos_state(ctx.creds.get_kerberos_state()) + machine_samdb = SamDB(url="ldap://%s" % ctx.server, + session_info=system_session(), + credentials=creds, lp=ctx.lp) + except: + pass + else: + token_res = machine_samdb.search(scope=ldb.SCOPE_BASE, base="", attrs=["tokenGroups"]) + if token_res[0]["tokenGroups"][0] \ + == res[0]["objectSID"][0]: + raise DCJoinException("Not removing account %s which " + "looks like a Samba DC account " + "matching the password we already have. " + "To override, remove secrets.ldb and secrets.tdb" + % ctx.samname) + + ctx.del_noerror(res[0].dn, recursive=True) + + krbtgt_dn = res[0].get('msDS-KrbTgtLink', idx=0) + if krbtgt_dn is not None: + ctx.new_krbtgt_dn = krbtgt_dn + ctx.del_noerror(ctx.new_krbtgt_dn) + + res = ctx.samdb.search(base=ctx.samdb.get_default_basedn(), + expression='(&(sAMAccountName=%s)(servicePrincipalName=%s))' % + (ldb.binary_encode("dns-%s" % ctx.myname), + ldb.binary_encode("dns/%s" % ctx.dnshostname)), + attrs=[]) + if res: + ctx.del_noerror(res[0].dn, recursive=True) + + res = ctx.samdb.search(base=ctx.samdb.get_default_basedn(), + expression='(sAMAccountName=%s)' % ldb.binary_encode("dns-%s" % ctx.myname), + attrs=[]) + if res: + raise DCJoinException("Not removing account %s which looks like " + "a Samba DNS service account but does not " + "have servicePrincipalName=%s" % + (ldb.binary_encode("dns-%s" % ctx.myname), + ldb.binary_encode("dns/%s" % ctx.dnshostname))) + + def cleanup_old_join(ctx, force=False): + """Remove any DNs from a previous join.""" + # find the krbtgt link + if not ctx.subdomain: + ctx.cleanup_old_accounts(force=force) + + if ctx.connection_dn is not None: + ctx.del_noerror(ctx.connection_dn) + if ctx.krbtgt_dn is not None: + ctx.del_noerror(ctx.krbtgt_dn) + ctx.del_noerror(ctx.ntds_dn) + ctx.del_noerror(ctx.server_dn, recursive=True) + if ctx.topology_dn: + ctx.del_noerror(ctx.topology_dn) + if ctx.partition_dn: + ctx.del_noerror(ctx.partition_dn) + + if ctx.subdomain: + binding_options = "sign" + lsaconn = lsa.lsarpc("ncacn_ip_tcp:%s[%s]" % (ctx.server, binding_options), + ctx.lp, ctx.creds) + + objectAttr = lsa.ObjectAttribute() + objectAttr.sec_qos = lsa.QosInfo() + + pol_handle = lsaconn.OpenPolicy2('', + objectAttr, + security.SEC_FLAG_MAXIMUM_ALLOWED) + + name = lsa.String() + name.string = ctx.realm + info = lsaconn.QueryTrustedDomainInfoByName(pol_handle, name, lsa.LSA_TRUSTED_DOMAIN_INFO_FULL_INFO) + + lsaconn.DeleteTrustedDomain(pol_handle, info.info_ex.sid) + + name = lsa.String() + name.string = ctx.forest_domain_name + info = lsaconn.QueryTrustedDomainInfoByName(pol_handle, name, lsa.LSA_TRUSTED_DOMAIN_INFO_FULL_INFO) + + lsaconn.DeleteTrustedDomain(pol_handle, info.info_ex.sid) + + if ctx.dns_a_dn: + ctx.del_noerror(ctx.dns_a_dn) + + if ctx.dns_cname_dn: + ctx.del_noerror(ctx.dns_cname_dn) + + def promote_possible(ctx): + """confirm that the account is just a bare NT4 BDC or a member server, so can be safely promoted""" + if ctx.subdomain: + # This shouldn't happen + raise Exception("Can not promote into a subdomain") + + res = ctx.samdb.search(base=ctx.samdb.get_default_basedn(), + expression='sAMAccountName=%s' % ldb.binary_encode(ctx.samname), + attrs=["msDS-krbTgtLink", "userAccountControl", "serverReferenceBL", "rIDSetReferences"]) + if len(res) == 0: + raise Exception("Could not find domain member account '%s' to promote to a DC, use 'samba-tool domain join' instead'" % ctx.samname) + if "msDS-KrbTgtLink" in res[0] or "serverReferenceBL" in res[0] or "rIDSetReferences" in res[0]: + raise Exception("Account '%s' appears to be an active DC, use 'samba-tool domain join' if you must re-create this account" % ctx.samname) + if (int(res[0]["userAccountControl"][0]) & (samba.dsdb.UF_WORKSTATION_TRUST_ACCOUNT | + samba.dsdb.UF_SERVER_TRUST_ACCOUNT) == 0): + raise Exception("Account %s is not a domain member or a bare NT4 BDC, use 'samba-tool domain join' instead'" % ctx.samname) + + ctx.promote_from_dn = res[0].dn + + def find_dc(ctx, domain): + """find a writeable DC for the given domain""" + try: + ctx.cldap_ret = ctx.net.finddc(domain=domain, flags=nbt.NBT_SERVER_LDAP | nbt.NBT_SERVER_DS | nbt.NBT_SERVER_WRITABLE) + except NTSTATUSError as error: + raise CommandError("Failed to find a writeable DC for domain '%s': %s" % + (domain, error.args[1])) + except Exception: + raise CommandError("Failed to find a writeable DC for domain '%s'" % domain) + if ctx.cldap_ret.client_site is not None and ctx.cldap_ret.client_site != "": + ctx.site = ctx.cldap_ret.client_site + return ctx.cldap_ret.pdc_dns_name + + def find_dc_site(ctx, server): + site = None + cldap_ret = ctx.net.finddc(address=server, + flags=nbt.NBT_SERVER_LDAP | nbt.NBT_SERVER_DS) + if cldap_ret.client_site is not None and cldap_ret.client_site != "": + site = cldap_ret.client_site + return site + + def get_behavior_version(ctx): + res = ctx.samdb.search(base=ctx.base_dn, scope=ldb.SCOPE_BASE, attrs=["msDS-Behavior-Version"]) + if "msDS-Behavior-Version" in res[0]: + return int(res[0]["msDS-Behavior-Version"][0]) + else: + return samba.dsdb.DS_DOMAIN_FUNCTION_2000 + + def get_dnsHostName(ctx): + res = ctx.samdb.search(base="", scope=ldb.SCOPE_BASE, attrs=["dnsHostName"]) + return str(res[0]["dnsHostName"][0]) + + def get_domain_name(ctx): + """get netbios name of the domain from the partitions record""" + partitions_dn = ctx.samdb.get_partitions_dn() + res = ctx.samdb.search(base=partitions_dn, scope=ldb.SCOPE_ONELEVEL, attrs=["nETBIOSName"], + expression='ncName=%s' % ldb.binary_encode(str(ctx.samdb.get_default_basedn()))) + return str(res[0]["nETBIOSName"][0]) + + def get_forest_domain_name(ctx): + """get netbios name of the domain from the partitions record""" + partitions_dn = ctx.samdb.get_partitions_dn() + res = ctx.samdb.search(base=partitions_dn, scope=ldb.SCOPE_ONELEVEL, attrs=["nETBIOSName"], + expression='ncName=%s' % ldb.binary_encode(str(ctx.samdb.get_root_basedn()))) + return str(res[0]["nETBIOSName"][0]) + + def get_parent_partition_dn(ctx): + """get the parent domain partition DN from parent DNS name""" + res = ctx.samdb.search(base=ctx.config_dn, attrs=[], + expression='(&(objectclass=crossRef)(dnsRoot=%s)(systemFlags:%s:=%u))' % + (ldb.binary_encode(ctx.parent_dnsdomain), + ldb.OID_COMPARATOR_AND, samba.dsdb.SYSTEM_FLAG_CR_NTDS_DOMAIN)) + return str(res[0].dn) + + def get_mysid(ctx): + """get the SID of the connected user. Only works with w2k8 and later, + so only used for RODC join""" + res = ctx.samdb.search(base="", scope=ldb.SCOPE_BASE, attrs=["tokenGroups"]) + binsid = res[0]["tokenGroups"][0] + return get_string(ctx.samdb.schema_format_value("objectSID", binsid)) + + def dn_exists(ctx, dn): + """check if a DN exists""" + try: + res = ctx.samdb.search(base=dn, scope=ldb.SCOPE_BASE, attrs=[]) + except ldb.LdbError as e5: + (enum, estr) = e5.args + if enum == ldb.ERR_NO_SUCH_OBJECT: + return False + raise + return True + + def add_krbtgt_account(ctx): + """RODCs need a special krbtgt account""" + print("Adding %s" % ctx.krbtgt_dn) + rec = { + "dn": ctx.krbtgt_dn, + "objectclass": "user", + "useraccountcontrol": str(samba.dsdb.UF_NORMAL_ACCOUNT | + samba.dsdb.UF_ACCOUNTDISABLE), + "showinadvancedviewonly": "TRUE", + "description": "krbtgt for %s" % ctx.samname} + ctx.samdb.add(rec, ["rodc_join:1:1"]) + + # now we need to search for the samAccountName attribute on the krbtgt DN, + # as this will have been magically set to the krbtgt number + res = ctx.samdb.search(base=ctx.krbtgt_dn, scope=ldb.SCOPE_BASE, attrs=["samAccountName"]) + ctx.krbtgt_name = res[0]["samAccountName"][0] + + print("Got krbtgt_name=%s" % ctx.krbtgt_name) + + m = ldb.Message() + m.dn = ldb.Dn(ctx.samdb, ctx.acct_dn) + m["msDS-krbTgtLink"] = ldb.MessageElement(ctx.krbtgt_dn, + ldb.FLAG_MOD_REPLACE, "msDS-krbTgtLink") + ctx.samdb.modify(m) + + ctx.new_krbtgt_dn = "CN=%s,CN=Users,%s" % (ctx.krbtgt_name, ctx.base_dn) + print("Renaming %s to %s" % (ctx.krbtgt_dn, ctx.new_krbtgt_dn)) + ctx.samdb.rename(ctx.krbtgt_dn, ctx.new_krbtgt_dn) + + def drsuapi_connect(ctx): + """make a DRSUAPI connection to the naming master""" + binding_options = "seal" + if ctx.lp.log_level() >= 9: + binding_options += ",print" + binding_string = "ncacn_ip_tcp:%s[%s]" % (ctx.server, binding_options) + ctx.drsuapi = drsuapi.drsuapi(binding_string, ctx.lp, ctx.creds) + (ctx.drsuapi_handle, ctx.bind_supported_extensions) = drs_utils.drs_DsBind(ctx.drsuapi) + + def create_tmp_samdb(ctx): + """create a temporary samdb object for schema queries""" + ctx.tmp_schema = Schema(ctx.domsid, + schemadn=ctx.schema_dn) + ctx.tmp_samdb = SamDB(session_info=system_session(), url=None, auto_connect=False, + credentials=ctx.creds, lp=ctx.lp, global_schema=False, + am_rodc=False) + ctx.tmp_samdb.set_schema(ctx.tmp_schema) + + def DsAddEntry(ctx, recs): + """add a record via the DRSUAPI DsAddEntry call""" + if ctx.drsuapi is None: + ctx.drsuapi_connect() + if ctx.tmp_samdb is None: + ctx.create_tmp_samdb() + + objects = [] + for rec in recs: + id = drsuapi.DsReplicaObjectIdentifier() + id.dn = rec['dn'] + + attrs = [] + for a in rec: + if a == 'dn': + continue + if not isinstance(rec[a], list): + v = [rec[a]] + else: + v = rec[a] + v = [x.encode('utf8') if isinstance(x, str) else x for x in v] + rattr = ctx.tmp_samdb.dsdb_DsReplicaAttribute(ctx.tmp_samdb, a, v) + attrs.append(rattr) + + attribute_ctr = drsuapi.DsReplicaAttributeCtr() + attribute_ctr.num_attributes = len(attrs) + attribute_ctr.attributes = attrs + + object = drsuapi.DsReplicaObject() + object.identifier = id + object.attribute_ctr = attribute_ctr + + list_object = drsuapi.DsReplicaObjectListItem() + list_object.object = object + objects.append(list_object) + + req2 = drsuapi.DsAddEntryRequest2() + req2.first_object = objects[0] + prev = req2.first_object + for o in objects[1:]: + prev.next_object = o + prev = o + + (level, ctr) = ctx.drsuapi.DsAddEntry(ctx.drsuapi_handle, 2, req2) + if level == 2: + if ctr.dir_err != drsuapi.DRSUAPI_DIRERR_OK: + print("DsAddEntry failed with dir_err %u" % ctr.dir_err) + raise RuntimeError("DsAddEntry failed") + if ctr.extended_err[0] != werror.WERR_SUCCESS: + print("DsAddEntry failed with status %s info %s" % (ctr.extended_err)) + raise RuntimeError("DsAddEntry failed") + if level == 3: + if ctr.err_ver != 1: + raise RuntimeError("expected err_ver 1, got %u" % ctr.err_ver) + if ctr.err_data.status[0] != werror.WERR_SUCCESS: + if ctr.err_data.info is None: + print("DsAddEntry failed with status %s, info omitted" % (ctr.err_data.status[1])) + else: + print("DsAddEntry failed with status %s info %s" % (ctr.err_data.status[1], + ctr.err_data.info.extended_err)) + raise RuntimeError("DsAddEntry failed") + if ctr.err_data.dir_err != drsuapi.DRSUAPI_DIRERR_OK: + print("DsAddEntry failed with dir_err %u" % ctr.err_data.dir_err) + raise RuntimeError("DsAddEntry failed") + + return ctr.objects + + def join_ntdsdsa_obj(ctx): + """return the ntdsdsa object to add""" + + print("Adding %s" % ctx.ntds_dn) + + # When joining Windows, the order of certain attributes (mostly only + # msDS-HasMasterNCs and HasMasterNCs) seems to matter + rec = OrderedDict([ + ("dn", ctx.ntds_dn), + ("objectclass", "nTDSDSA"), + ("systemFlags", str(samba.dsdb.SYSTEM_FLAG_DISALLOW_MOVE_ON_DELETE)), + ("dMDLocation", ctx.schema_dn)]) + + nc_list = [ctx.base_dn, ctx.config_dn, ctx.schema_dn] + + if ctx.behavior_version >= samba.dsdb.DS_DOMAIN_FUNCTION_2003: + # This allows an override via smb.conf or --option using + # "ad dc functional level" to make us seem like 2016 to + # join such a domain for (say) a migration, or to test the + # partially implemented 2016 support. + domainControllerFunctionality = functional_level.dc_level_from_lp(ctx.lp) + rec["msDS-Behavior-Version"] = str(domainControllerFunctionality) + + if ctx.behavior_version >= samba.dsdb.DS_DOMAIN_FUNCTION_2003: + rec["msDS-HasDomainNCs"] = ctx.base_dn + + if ctx.RODC: + rec["objectCategory"] = "CN=NTDS-DSA-RO,%s" % ctx.schema_dn + rec["msDS-HasFullReplicaNCs"] = ctx.full_nc_list + rec["options"] = "37" + else: + rec["objectCategory"] = "CN=NTDS-DSA,%s" % ctx.schema_dn + + # Note that Windows seems to have an undocumented requirement that + # the msDS-HasMasterNCs attribute occurs before HasMasterNCs + if ctx.behavior_version >= samba.dsdb.DS_DOMAIN_FUNCTION_2003: + rec["msDS-HasMasterNCs"] = ctx.full_nc_list + + rec["HasMasterNCs"] = [] + for nc in nc_list: + if nc in ctx.full_nc_list: + rec["HasMasterNCs"].append(nc) + + rec["options"] = "1" + rec["invocationId"] = ndr_pack(ctx.invocation_id) + + return rec + + def join_add_ntdsdsa(ctx): + """add the ntdsdsa object""" + + rec = ctx.join_ntdsdsa_obj() + if ctx.forced_local_samdb: + ctx.samdb.add(rec, controls=["relax:0"]) + elif ctx.RODC: + ctx.samdb.add(rec, ["rodc_join:1:1"]) + else: + ctx.DsAddEntry([rec]) + + # find the GUID of our NTDS DN + res = ctx.samdb.search(base=ctx.ntds_dn, scope=ldb.SCOPE_BASE, attrs=["objectGUID"]) + ctx.ntds_guid = misc.GUID(ctx.samdb.schema_format_value("objectGUID", res[0]["objectGUID"][0])) + + def join_add_objects(ctx, specified_sid=None): + """add the various objects needed for the join""" + if ctx.acct_dn: + print("Adding %s" % ctx.acct_dn) + rec = { + "dn": ctx.acct_dn, + "objectClass": "computer", + "displayname": ctx.samname, + "samaccountname": ctx.samname, + "userAccountControl": str(ctx.userAccountControl | samba.dsdb.UF_ACCOUNTDISABLE), + "dnshostname": ctx.dnshostname} + if ctx.behavior_version >= samba.dsdb.DS_DOMAIN_FUNCTION_2008: + rec['msDS-SupportedEncryptionTypes'] = str(samba.dsdb.ENC_ALL_TYPES) + elif ctx.promote_existing: + rec['msDS-SupportedEncryptionTypes'] = [] + if ctx.managedby: + rec["managedby"] = ctx.managedby + elif ctx.promote_existing: + rec["managedby"] = [] + + if ctx.never_reveal_sid: + rec["msDS-NeverRevealGroup"] = ctx.never_reveal_sid + elif ctx.promote_existing: + rec["msDS-NeverRevealGroup"] = [] + + if ctx.reveal_sid: + rec["msDS-RevealOnDemandGroup"] = ctx.reveal_sid + elif ctx.promote_existing: + rec["msDS-RevealOnDemandGroup"] = [] + + if specified_sid: + rec["objectSid"] = ndr_pack(specified_sid) + + if ctx.promote_existing: + if ctx.promote_from_dn != ctx.acct_dn: + ctx.samdb.rename(ctx.promote_from_dn, ctx.acct_dn) + ctx.samdb.modify(ldb.Message.from_dict(ctx.samdb, rec, ldb.FLAG_MOD_REPLACE)) + else: + controls = None + if specified_sid is not None: + controls = ["relax:0"] + ctx.samdb.add(rec, controls=controls) + + if ctx.krbtgt_dn: + ctx.add_krbtgt_account() + + if ctx.server_dn: + print("Adding %s" % ctx.server_dn) + rec = { + "dn": ctx.server_dn, + "objectclass": "server", + # windows uses 50000000 decimal for systemFlags. A windows hex/decimal mixup bug? + "systemFlags": str(samba.dsdb.SYSTEM_FLAG_CONFIG_ALLOW_RENAME | + samba.dsdb.SYSTEM_FLAG_CONFIG_ALLOW_LIMITED_MOVE | + samba.dsdb.SYSTEM_FLAG_DISALLOW_MOVE_ON_DELETE), + # windows seems to add the dnsHostName later + "dnsHostName": ctx.dnshostname} + + if ctx.acct_dn: + rec["serverReference"] = ctx.acct_dn + + ctx.samdb.add(rec) + + if ctx.subdomain: + # the rest is done after replication + ctx.ntds_guid = None + return + + if ctx.ntds_dn: + ctx.join_add_ntdsdsa() + + # Add the Replica-Locations or RO-Replica-Locations attributes + # TODO Is this supposed to be for the schema partition too? + expr = "(&(objectClass=crossRef)(ncName=%s))" % ldb.binary_encode(ctx.domaindns_zone) + domain = (ctx.samdb.search(scope=ldb.SCOPE_ONELEVEL, + attrs=[], + base=ctx.samdb.get_partitions_dn(), + expression=expr), ctx.domaindns_zone) + + expr = "(&(objectClass=crossRef)(ncName=%s))" % ldb.binary_encode(ctx.forestdns_zone) + forest = (ctx.samdb.search(scope=ldb.SCOPE_ONELEVEL, + attrs=[], + base=ctx.samdb.get_partitions_dn(), + expression=expr), ctx.forestdns_zone) + + for part, zone in (domain, forest): + if zone not in ctx.nc_list: + continue + + if len(part) == 1: + m = ldb.Message() + m.dn = part[0].dn + attr = "msDS-NC-Replica-Locations" + if ctx.RODC: + attr = "msDS-NC-RO-Replica-Locations" + + m[attr] = ldb.MessageElement(ctx.ntds_dn, + ldb.FLAG_MOD_ADD, attr) + ctx.samdb.modify(m) + + if ctx.connection_dn is not None: + print("Adding %s" % ctx.connection_dn) + rec = { + "dn": ctx.connection_dn, + "objectclass": "nTDSConnection", + "enabledconnection": "TRUE", + "options": "65", + "fromServer": ctx.dc_ntds_dn} + ctx.samdb.add(rec) + + if ctx.acct_dn: + print("Adding SPNs to %s" % ctx.acct_dn) + m = ldb.Message() + m.dn = ldb.Dn(ctx.samdb, ctx.acct_dn) + for i in range(len(ctx.SPNs)): + ctx.SPNs[i] = ctx.SPNs[i].replace("$NTDSGUID", str(ctx.ntds_guid)) + m["servicePrincipalName"] = ldb.MessageElement(ctx.SPNs, + ldb.FLAG_MOD_REPLACE, + "servicePrincipalName") + ctx.samdb.modify(m) + + # The account password set operation should normally be done over + # LDAP. Windows 2000 DCs however allow this only with SSL + # connections which are hard to set up and otherwise refuse with + # ERR_UNWILLING_TO_PERFORM. In this case we fall back to libnet + # over SAMR. + print("Setting account password for %s" % ctx.samname) + try: + ctx.samdb.setpassword("(&(objectClass=user)(sAMAccountName=%s))" + % ldb.binary_encode(ctx.samname), + ctx.acct_pass, + force_change_at_next_login=False, + username=ctx.samname) + except ldb.LdbError as e2: + (num, _) = e2.args + if num != ldb.ERR_UNWILLING_TO_PERFORM: + raise + ctx.net.set_password(account_name=ctx.samname, + domain_name=ctx.domain_name, + newpassword=ctx.acct_pass) + + res = ctx.samdb.search(base=ctx.acct_dn, scope=ldb.SCOPE_BASE, + attrs=["msDS-KeyVersionNumber", + "objectSID"]) + if "msDS-KeyVersionNumber" in res[0]: + ctx.key_version_number = int(res[0]["msDS-KeyVersionNumber"][0]) + else: + ctx.key_version_number = None + + ctx.new_dc_account_sid = ndr_unpack(security.dom_sid, + res[0]["objectSid"][0]) + + print("Enabling account") + m = ldb.Message() + m.dn = ldb.Dn(ctx.samdb, ctx.acct_dn) + m["userAccountControl"] = ldb.MessageElement(str(ctx.userAccountControl), + ldb.FLAG_MOD_REPLACE, + "userAccountControl") + ctx.samdb.modify(m) + + if ctx.dns_backend.startswith("BIND9_"): + ctx.dnspass = samba.generate_random_password(128, 255) + + recs = ctx.samdb.parse_ldif(read_and_sub_file(setup_path("provision_dns_add_samba.ldif"), + {"DNSDOMAIN": ctx.dnsdomain, + "DOMAINDN": ctx.base_dn, + "HOSTNAME": ctx.myname, + "DNSPASS_B64": b64encode(ctx.dnspass.encode('utf-16-le')).decode('utf8'), + "DNSNAME": ctx.dnshostname})) + for changetype, msg in recs: + assert changetype == ldb.CHANGETYPE_NONE + dns_acct_dn = msg["dn"] + print("Adding DNS account %s with dns/ SPN" % msg["dn"]) + + # Remove dns password (we will set it as a modify, as we can't do clearTextPassword over LDAP) + del msg["clearTextPassword"] + # Remove isCriticalSystemObject for similar reasons, it cannot be set over LDAP + del msg["isCriticalSystemObject"] + # Disable account until password is set + msg["userAccountControl"] = str(samba.dsdb.UF_NORMAL_ACCOUNT | + samba.dsdb.UF_ACCOUNTDISABLE) + try: + ctx.samdb.add(msg) + except ldb.LdbError as e: + (num, _) = e.args + if num != ldb.ERR_ENTRY_ALREADY_EXISTS: + raise + + # The account password set operation should normally be done over + # LDAP. Windows 2000 DCs however allow this only with SSL + # connections which are hard to set up and otherwise refuse with + # ERR_UNWILLING_TO_PERFORM. In this case we fall back to libnet + # over SAMR. + print("Setting account password for dns-%s" % ctx.myname) + try: + ctx.samdb.setpassword("(&(objectClass=user)(samAccountName=dns-%s))" + % ldb.binary_encode(ctx.myname), + ctx.dnspass, + force_change_at_next_login=False, + username=ctx.samname) + except ldb.LdbError as e3: + (num, _) = e3.args + if num != ldb.ERR_UNWILLING_TO_PERFORM: + raise + ctx.net.set_password(account_name="dns-%s" % ctx.myname, + domain_name=ctx.domain_name, + newpassword=ctx.dnspass) + + res = ctx.samdb.search(base=dns_acct_dn, scope=ldb.SCOPE_BASE, + attrs=["msDS-KeyVersionNumber"]) + if "msDS-KeyVersionNumber" in res[0]: + ctx.dns_key_version_number = int(res[0]["msDS-KeyVersionNumber"][0]) + else: + ctx.dns_key_version_number = None + + def join_add_objects2(ctx): + """add the various objects needed for the join, for subdomains post replication""" + + print("Adding %s" % ctx.partition_dn) + name_map = {'SubdomainAdmins': "%s-%s" % (str(ctx.domsid), security.DOMAIN_RID_ADMINS)} + sd_binary = descriptor.get_paritions_crossref_subdomain_descriptor(ctx.forestsid, name_map=name_map) + rec = { + "dn": ctx.partition_dn, + "objectclass": "crossRef", + "objectCategory": "CN=Cross-Ref,%s" % ctx.schema_dn, + "nCName": ctx.base_dn, + "nETBIOSName": ctx.domain_name, + "dnsRoot": ctx.dnsdomain, + "trustParent": ctx.parent_partition_dn, + "systemFlags": str(samba.dsdb.SYSTEM_FLAG_CR_NTDS_NC |samba.dsdb.SYSTEM_FLAG_CR_NTDS_DOMAIN), + "ntSecurityDescriptor": sd_binary, + } + + if ctx.behavior_version >= samba.dsdb.DS_DOMAIN_FUNCTION_2003: + rec["msDS-Behavior-Version"] = str(ctx.behavior_version) + + rec2 = ctx.join_ntdsdsa_obj() + + objects = ctx.DsAddEntry([rec, rec2]) + if len(objects) != 2: + raise DCJoinException("Expected 2 objects from DsAddEntry") + + ctx.ntds_guid = objects[1].guid + + print("Replicating partition DN") + ctx.repl.replicate(ctx.partition_dn, + misc.GUID("00000000-0000-0000-0000-000000000000"), + ctx.ntds_guid, + exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ, + replica_flags=drsuapi.DRSUAPI_DRS_WRIT_REP) + + print("Replicating NTDS DN") + ctx.repl.replicate(ctx.ntds_dn, + misc.GUID("00000000-0000-0000-0000-000000000000"), + ctx.ntds_guid, + exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ, + replica_flags=drsuapi.DRSUAPI_DRS_WRIT_REP) + + def join_provision(ctx): + """Provision the local SAM.""" + + print("Calling bare provision") + + smbconf = ctx.lp.configfile + + presult = provision(ctx.logger, system_session(), smbconf=smbconf, + targetdir=ctx.targetdir, samdb_fill=FILL_DRS, realm=ctx.realm, + rootdn=ctx.root_dn, domaindn=ctx.base_dn, + schemadn=ctx.schema_dn, configdn=ctx.config_dn, + serverdn=ctx.server_dn, domain=ctx.domain_name, + hostname=ctx.myname, domainsid=ctx.domsid, + machinepass=ctx.acct_pass, serverrole="active directory domain controller", + sitename=ctx.site, lp=ctx.lp, ntdsguid=ctx.ntds_guid, + use_ntvfs=ctx.use_ntvfs, dns_backend=ctx.dns_backend, + plaintext_secrets=ctx.plaintext_secrets, + backend_store=ctx.backend_store, + backend_store_size=ctx.backend_store_size, + batch_mode=True) + print("Provision OK for domain DN %s" % presult.domaindn) + ctx.local_samdb = presult.samdb + ctx.lp = presult.lp + ctx.paths = presult.paths + ctx.names = presult.names + + # Fix up the forestsid, it may be different if we are joining as a subdomain + ctx.names.forestsid = ctx.forestsid + + def join_provision_own_domain(ctx): + """Provision the local SAM.""" + + # we now operate exclusively on the local database, which + # we need to reopen in order to get the newly created schema + # we set the transaction_index_cache_size to 200,000 to ensure it is + # not too small, if it's too small the performance of the join will + # be negatively impacted. + print("Reconnecting to local samdb") + ctx.samdb = SamDB(url=ctx.local_samdb.url, + options=[ + "transaction_index_cache_size:200000"], + session_info=system_session(), + lp=ctx.local_samdb.lp, + global_schema=False) + ctx.samdb.set_invocation_id(str(ctx.invocation_id)) + ctx.local_samdb = ctx.samdb + + ctx.logger.info("Finding domain GUID from ncName") + res = ctx.local_samdb.search(base=ctx.partition_dn, scope=ldb.SCOPE_BASE, attrs=['ncName'], + controls=["extended_dn:1:1", "reveal_internals:0"]) + + if 'nCName' not in res[0]: + raise DCJoinException("Can't find naming context on partition DN %s in %s" % (ctx.partition_dn, ctx.samdb.url)) + + try: + ctx.names.domainguid = str(misc.GUID(ldb.Dn(ctx.samdb, res[0]['ncName'][0].decode('utf8')).get_extended_component('GUID'))) + except KeyError: + raise DCJoinException("Can't find GUID in naming master on partition DN %s" % res[0]['ncName'][0]) + + ctx.logger.info("Got domain GUID %s" % ctx.names.domainguid) + + ctx.logger.info("Calling own domain provision") + + secrets_ldb = Ldb(ctx.paths.secrets, session_info=system_session(), lp=ctx.lp) + + provision_fill(ctx.local_samdb, secrets_ldb, + ctx.logger, ctx.names, ctx.paths, + dom_for_fun_level=ctx.behavior_version, + samdb_fill=FILL_SUBDOMAIN, + machinepass=ctx.acct_pass, serverrole="active directory domain controller", + lp=ctx.lp, hostip=ctx.names.hostip, hostip6=ctx.names.hostip6, + dns_backend=ctx.dns_backend, adminpass=ctx.adminpass) + + if ctx.behavior_version >= samba.dsdb.DS_DOMAIN_FUNCTION_2012: + adprep_level = ctx.behavior_version + + updates_allowed_overridden = False + if ctx.lp.get("dsdb:schema update allowed") is None: + ctx.lp.set("dsdb:schema update allowed", "yes") + print("Temporarily overriding 'dsdb:schema update allowed' setting") + updates_allowed_overridden = True + + ctx.samdb.transaction_start() + try: + from samba.domain_update import DomainUpdate + + domain = DomainUpdate(ctx.local_samdb, fix=True) + domain.check_updates_functional_level(adprep_level, + samba.dsdb.DS_DOMAIN_FUNCTION_2008, + update_revision=True) + + ctx.samdb.transaction_commit() + except Exception as e: + ctx.samdb.transaction_cancel() + raise DCJoinException("DomainUpdate() failed: %s" % e) + + if updates_allowed_overridden: + ctx.lp.set("dsdb:schema update allowed", "no") + + print("Provision OK for domain %s" % ctx.names.dnsdomain) + + def create_replicator(ctx, repl_creds, binding_options): + """Creates a new DRS object for managing replications""" + return drs_utils.drs_Replicate( + "ncacn_ip_tcp:%s[%s]" % (ctx.server, binding_options), + ctx.lp, repl_creds, ctx.local_samdb, ctx.invocation_id) + + def join_replicate(ctx): + """Replicate the SAM.""" + + ctx.logger.info("Starting replication") + + # A global transaction is started so that linked attributes + # are applied at the very end, once all partitions are + # replicated. This helps get all cross-partition links. + ctx.local_samdb.transaction_start() + try: + source_dsa_invocation_id = misc.GUID(ctx.samdb.get_invocation_id()) + if ctx.ntds_guid is None: + print("Using DS_BIND_GUID_W2K3") + destination_dsa_guid = misc.GUID(drsuapi.DRSUAPI_DS_BIND_GUID_W2K3) + else: + destination_dsa_guid = ctx.ntds_guid + + if ctx.RODC: + repl_creds = Credentials() + repl_creds.guess(ctx.lp) + repl_creds.set_kerberos_state(DONT_USE_KERBEROS) + repl_creds.set_username(ctx.samname) + repl_creds.set_password(ctx.acct_pass) + else: + repl_creds = ctx.creds + + binding_options = "seal" + if ctx.lp.log_level() >= 9: + binding_options += ",print" + + repl = ctx.create_replicator(repl_creds, binding_options) + + repl.replicate(ctx.schema_dn, source_dsa_invocation_id, + destination_dsa_guid, schema=True, rodc=ctx.RODC, + replica_flags=ctx.replica_flags) + repl.replicate(ctx.config_dn, source_dsa_invocation_id, + destination_dsa_guid, rodc=ctx.RODC, + replica_flags=ctx.replica_flags) + if not ctx.subdomain: + # Replicate first the critical objects for the basedn + + # We do this to match Windows. The default case is to + # do a critical objects replication, then a second + # with all objects. + + print("Replicating critical objects from the base DN of the domain") + try: + repl.replicate(ctx.base_dn, source_dsa_invocation_id, + destination_dsa_guid, rodc=ctx.RODC, + replica_flags=ctx.domain_replica_flags | drsuapi.DRSUAPI_DRS_CRITICAL_ONLY) + except WERRORError as e: + + if e.args[0] == werror.WERR_DS_DRA_MISSING_PARENT: + ctx.logger.warning("First pass of replication with " + "DRSUAPI_DRS_CRITICAL_ONLY " + "not possible due to a missing parent object. " + "This is typical of a Samba " + "4.5 or earlier server. " + "We will replicate all the objects instead.") + else: + raise + + # Now replicate all the objects in the domain (unless + # we were run with --critical-only). + # + # Doing the replication of users as a second pass + # matches more closely the Windows behaviour, which is + # actually to do this on first startup. + # + # Use --critical-only if you want that (but you don't + # really, it is better to see any errors here). + if not ctx.domain_replica_flags & drsuapi.DRSUAPI_DRS_CRITICAL_ONLY: + try: + repl.replicate(ctx.base_dn, source_dsa_invocation_id, + destination_dsa_guid, rodc=ctx.RODC, + replica_flags=ctx.domain_replica_flags) + except WERRORError as e: + + if e.args[0] == werror.WERR_DS_DRA_MISSING_PARENT and \ + ctx.domain_replica_flags & drsuapi.DRSUAPI_DRS_CRITICAL_ONLY: + ctx.logger.warning("Replication with DRSUAPI_DRS_CRITICAL_ONLY " + "failed due to a missing parent object. " + "This may be a Samba 4.5 or earlier server " + "and is not compatible with --critical-only") + raise + + print("Done with always replicated NC (base, config, schema)") + + # At this point we should already have an entry in the ForestDNS + # and DomainDNS NC (those under CN=Partitions,DC=...) in order to + # indicate that we hold a replica for this NC. + for nc in (ctx.domaindns_zone, ctx.forestdns_zone): + if nc in ctx.nc_list: + print("Replicating %s" % (str(nc))) + repl.replicate(nc, source_dsa_invocation_id, + destination_dsa_guid, rodc=ctx.RODC, + replica_flags=ctx.replica_flags) + + if ctx.RODC: + repl.replicate(ctx.acct_dn, source_dsa_invocation_id, + destination_dsa_guid, + exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET, rodc=True) + repl.replicate(ctx.new_krbtgt_dn, source_dsa_invocation_id, + destination_dsa_guid, + exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET, rodc=True) + elif ctx.rid_manager_dn is not None: + # Try and get a RID Set if we can. This is only possible against the RID Master. Warn otherwise. + try: + repl.replicate(ctx.rid_manager_dn, source_dsa_invocation_id, + destination_dsa_guid, + exop=drsuapi.DRSUAPI_EXOP_FSMO_RID_ALLOC) + except samba.DsExtendedError as e1: + (enum, estr) = e1.args + if enum == drsuapi.DRSUAPI_EXOP_ERR_FSMO_NOT_OWNER: + print("WARNING: Unable to replicate own RID Set, as server %s (the server we joined) is not the RID Master." % ctx.server) + print("NOTE: This is normal and expected, Samba will be able to create users after it contacts the RID Master at first startup.") + else: + raise + + ctx.repl = repl + ctx.source_dsa_invocation_id = source_dsa_invocation_id + ctx.destination_dsa_guid = destination_dsa_guid + + ctx.logger.info("Committing SAM database - this may take some time") + except: + ctx.local_samdb.transaction_cancel() + raise + else: + + # This is a special case, we have completed a full + # replication so if a link comes to us that points to a + # deleted object, and we asked for all objects already, we + # just have to ignore it, the chance to re-try the + # replication with GET_TGT has long gone. This can happen + # if the object is deleted and sent to us after the link + # was sent, as we are processing all links in the + # transaction_commit(). + if not ctx.domain_replica_flags & drsuapi.DRSUAPI_DRS_CRITICAL_ONLY: + ctx.local_samdb.set_opaque_integer(dsdb.DSDB_FULL_JOIN_REPLICATION_COMPLETED_OPAQUE_NAME, + 1) + ctx.local_samdb.transaction_commit() + ctx.local_samdb.set_opaque_integer(dsdb.DSDB_FULL_JOIN_REPLICATION_COMPLETED_OPAQUE_NAME, + 0) + ctx.logger.info("Committed SAM database") + + # A large replication may have caused our LDB connection to the + # remote DC to timeout, so check the connection is still alive + ctx.refresh_ldb_connection() + + def refresh_ldb_connection(ctx): + try: + # query the rootDSE to check the connection + ctx.samdb.search(scope=ldb.SCOPE_BASE, attrs=[]) + except ldb.LdbError as e: + (enum, estr) = e.args + + # if the connection was disconnected, then reconnect + if (enum == ldb.ERR_OPERATIONS_ERROR and + ('NT_STATUS_CONNECTION_DISCONNECTED' in estr or + 'NT_STATUS_CONNECTION_RESET' in estr)): + ctx.logger.warning("LDB connection disconnected. Reconnecting") + ctx.samdb = SamDB(url="ldap://%s" % ctx.server, + session_info=system_session(), + credentials=ctx.creds, lp=ctx.lp) + else: + raise DCJoinException(estr) + + def send_DsReplicaUpdateRefs(ctx, dn): + r = drsuapi.DsReplicaUpdateRefsRequest1() + r.naming_context = drsuapi.DsReplicaObjectIdentifier() + r.naming_context.dn = str(dn) + r.naming_context.guid = misc.GUID("00000000-0000-0000-0000-000000000000") + r.naming_context.sid = security.dom_sid("S-0-0") + r.dest_dsa_guid = ctx.ntds_guid + r.dest_dsa_dns_name = "%s._msdcs.%s" % (str(ctx.ntds_guid), ctx.dnsforest) + r.options = drsuapi.DRSUAPI_DRS_ADD_REF | drsuapi.DRSUAPI_DRS_DEL_REF + if not ctx.RODC: + r.options |= drsuapi.DRSUAPI_DRS_WRIT_REP + + if ctx.drsuapi is None: + ctx.drsuapi_connect() + + ctx.drsuapi.DsReplicaUpdateRefs(ctx.drsuapi_handle, 1, r) + + def join_add_dns_records(ctx): + """Remotely Add a DNS record to the target DC. We assume that if we + replicate DNS that the server holds the DNS roles and can accept + updates. + + This avoids issues getting replication going after the DC + first starts as the rest of the domain does not have to + wait for samba_dnsupdate to run successfully. + + Specifically, we add the records implied by the DsReplicaUpdateRefs + call above. + + We do not just run samba_dnsupdate as we want to strictly + operate against the DC we just joined: + - We do not want to query another DNS server + - We do not want to obtain a Kerberos ticket + (as the KDC we select may not be the DC we just joined, + and so may not be in sync with the password we just set) + - We do not wish to set the _ldap records until we have started + - We do not wish to use NTLM (the --use-samba-tool mode forces + NTLM) + + """ + + client_version = dnsserver.DNS_CLIENT_VERSION_LONGHORN + select_flags = dnsserver.DNS_RPC_VIEW_AUTHORITY_DATA |\ + dnsserver.DNS_RPC_VIEW_NO_CHILDREN + + zone = ctx.dnsdomain + msdcs_zone = "_msdcs.%s" % ctx.dnsforest + name = ctx.myname + msdcs_cname = str(ctx.ntds_guid) + cname_target = "%s.%s" % (name, zone) + IPs = samba.interface_ips(ctx.lp, ctx.force_all_ips) + + ctx.logger.info("Adding %d remote DNS records for %s.%s" % + (len(IPs), name, zone)) + + binding_options = "sign" + dns_conn = dnsserver.dnsserver("ncacn_ip_tcp:%s[%s]" % (ctx.server, binding_options), + ctx.lp, ctx.creds) + + name_found = True + + sd_helper = sd_utils.SDUtils(ctx.samdb) + + change_owner_sd = security.descriptor() + change_owner_sd.owner_sid = ctx.new_dc_account_sid + change_owner_sd.group_sid = security.dom_sid("%s-%d" % + (str(ctx.domsid), + security.DOMAIN_RID_DCS)) + + # TODO: Remove any old records from the primary DNS name + try: + (buflen, res) \ + = dns_conn.DnssrvEnumRecords2(client_version, + 0, + ctx.server, + zone, + name, + None, + dnsp.DNS_TYPE_ALL, + select_flags, + None, + None) + except WERRORError as e: + if e.args[0] == werror.WERR_DNS_ERROR_NAME_DOES_NOT_EXIST: + name_found = False + + if name_found: + for rec in res.rec: + for record in rec.records: + if record.wType == dnsp.DNS_TYPE_A or \ + record.wType == dnsp.DNS_TYPE_AAAA: + # delete record + del_rec_buf = dnsserver.DNS_RPC_RECORD_BUF() + del_rec_buf.rec = record + try: + dns_conn.DnssrvUpdateRecord2(client_version, + 0, + ctx.server, + zone, + name, + None, + del_rec_buf) + except WERRORError as e: + if e.args[0] == werror.WERR_DNS_ERROR_NAME_DOES_NOT_EXIST: + pass + else: + raise + + for IP in IPs: + if IP.find(':') != -1: + ctx.logger.info("Adding DNS AAAA record %s.%s for IPv6 IP: %s" + % (name, zone, IP)) + rec = AAAARecord(IP) + else: + ctx.logger.info("Adding DNS A record %s.%s for IPv4 IP: %s" + % (name, zone, IP)) + rec = ARecord(IP) + + # Add record + add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF() + add_rec_buf.rec = rec + dns_conn.DnssrvUpdateRecord2(client_version, + 0, + ctx.server, + zone, + name, + add_rec_buf, + None) + + if (len(IPs) > 0): + domaindns_zone_dn = ldb.Dn(ctx.samdb, ctx.domaindns_zone) + (ctx.dns_a_dn, ldap_record) \ + = ctx.samdb.dns_lookup("%s.%s" % (name, zone), + dns_partition=domaindns_zone_dn) + + # Make the DC own the DNS record, not the administrator + sd_helper.modify_sd_on_dn(ctx.dns_a_dn, change_owner_sd, + controls=["sd_flags:1:%d" + % (security.SECINFO_OWNER + | security.SECINFO_GROUP)]) + + # Add record + ctx.logger.info("Adding DNS CNAME record %s.%s for %s" + % (msdcs_cname, msdcs_zone, cname_target)) + + add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF() + rec = CNAMERecord(cname_target) + add_rec_buf.rec = rec + dns_conn.DnssrvUpdateRecord2(client_version, + 0, + ctx.server, + msdcs_zone, + msdcs_cname, + add_rec_buf, + None) + + forestdns_zone_dn = ldb.Dn(ctx.samdb, ctx.forestdns_zone) + (ctx.dns_cname_dn, ldap_record) \ + = ctx.samdb.dns_lookup("%s.%s" % (msdcs_cname, msdcs_zone), + dns_partition=forestdns_zone_dn) + + # Make the DC own the DNS record, not the administrator + sd_helper.modify_sd_on_dn(ctx.dns_cname_dn, change_owner_sd, + controls=["sd_flags:1:%d" + % (security.SECINFO_OWNER + | security.SECINFO_GROUP)]) + + ctx.logger.info("All other DNS records (like _ldap SRV records) " + + "will be created samba_dnsupdate on first startup") + + def join_replicate_new_dns_records(ctx): + for nc in (ctx.domaindns_zone, ctx.forestdns_zone): + if nc in ctx.nc_list: + ctx.logger.info("Replicating new DNS records in %s" % (str(nc))) + ctx.repl.replicate(nc, ctx.source_dsa_invocation_id, + ctx.ntds_guid, rodc=ctx.RODC, + replica_flags=ctx.replica_flags, + full_sync=False) + + def join_finalise(ctx): + """Finalise the join, mark us synchronised and setup secrets db.""" + + # FIXME we shouldn't do this in all cases + + # If for some reasons we joined in another site than the one of + # DC we just replicated from then we don't need to send the updatereplicateref + # as replication between sites is time based and on the initiative of the + # requesting DC + ctx.logger.info("Sending DsReplicaUpdateRefs for all the replicated partitions") + for nc in ctx.nc_list: + ctx.send_DsReplicaUpdateRefs(nc) + + if ctx.RODC: + print("Setting RODC invocationId") + ctx.local_samdb.set_invocation_id(str(ctx.invocation_id)) + ctx.local_samdb.set_opaque_integer("domainFunctionality", + ctx.behavior_version) + m = ldb.Message() + m.dn = ldb.Dn(ctx.local_samdb, "%s" % ctx.ntds_dn) + m["invocationId"] = ldb.MessageElement(ndr_pack(ctx.invocation_id), + ldb.FLAG_MOD_REPLACE, + "invocationId") + ctx.local_samdb.modify(m) + + # Note: as RODC the invocationId is only stored + # on the RODC itself, the other DCs never see it. + # + # That's is why we fix up the replPropertyMetaData stamp + # for the 'invocationId' attribute, we need to change + # the 'version' to '0', this is what windows 2008r2 does as RODC + # + # This means if the object on a RWDC ever gets a invocationId + # attribute, it will have version '1' (or higher), which will + # will overwrite the RODC local value. + ctx.local_samdb.set_attribute_replmetadata_version(m.dn, + "invocationId", + 0) + + ctx.logger.info("Setting isSynchronized and dsServiceName") + m = ldb.Message() + m.dn = ldb.Dn(ctx.local_samdb, '@ROOTDSE') + m["isSynchronized"] = ldb.MessageElement("TRUE", ldb.FLAG_MOD_REPLACE, "isSynchronized") + + guid = ctx.ntds_guid + m["dsServiceName"] = ldb.MessageElement("" % str(guid), + ldb.FLAG_MOD_REPLACE, "dsServiceName") + ctx.local_samdb.modify(m) + + if ctx.subdomain: + return + + secrets_ldb = Ldb(ctx.paths.secrets, session_info=system_session(), lp=ctx.lp) + + ctx.logger.info("Setting up secrets database") + secretsdb_self_join(secrets_ldb, domain=ctx.domain_name, + realm=ctx.realm, + dnsdomain=ctx.dnsdomain, + netbiosname=ctx.myname, + domainsid=ctx.domsid, + machinepass=ctx.acct_pass, + secure_channel_type=ctx.secure_channel_type, + key_version_number=ctx.key_version_number) + + if ctx.dns_backend.startswith("BIND9_"): + setup_bind9_dns(ctx.local_samdb, secrets_ldb, + ctx.names, ctx.paths, ctx.logger, + dns_backend=ctx.dns_backend, + dnspass=ctx.dnspass, os_level=ctx.behavior_version, + key_version_number=ctx.dns_key_version_number) + + def join_setup_trusts(ctx): + """provision the local SAM.""" + + print("Setup domain trusts with server %s" % ctx.server) + binding_options = "" # why doesn't signing work here? w2k8r2 claims no session key + lsaconn = lsa.lsarpc("ncacn_np:%s[%s]" % (ctx.server, binding_options), + ctx.lp, ctx.creds) + + objectAttr = lsa.ObjectAttribute() + objectAttr.sec_qos = lsa.QosInfo() + + pol_handle = lsaconn.OpenPolicy2(''.decode('utf-8'), + objectAttr, security.SEC_FLAG_MAXIMUM_ALLOWED) + + info = lsa.TrustDomainInfoInfoEx() + info.domain_name.string = ctx.dnsdomain + info.netbios_name.string = ctx.domain_name + info.sid = ctx.domsid + info.trust_direction = lsa.LSA_TRUST_DIRECTION_INBOUND | lsa.LSA_TRUST_DIRECTION_OUTBOUND + info.trust_type = lsa.LSA_TRUST_TYPE_UPLEVEL + info.trust_attributes = lsa.LSA_TRUST_ATTRIBUTE_WITHIN_FOREST + + try: + oldname = lsa.String() + oldname.string = ctx.dnsdomain + oldinfo = lsaconn.QueryTrustedDomainInfoByName(pol_handle, oldname, + lsa.LSA_TRUSTED_DOMAIN_INFO_FULL_INFO) + print("Removing old trust record for %s (SID %s)" % (ctx.dnsdomain, oldinfo.info_ex.sid)) + lsaconn.DeleteTrustedDomain(pol_handle, oldinfo.info_ex.sid) + except RuntimeError: + pass + + password_blob = string_to_byte_array(ctx.trustdom_pass.encode('utf-16-le')) + + clear_value = drsblobs.AuthInfoClear() + clear_value.size = len(password_blob) + clear_value.password = password_blob + + clear_authentication_information = drsblobs.AuthenticationInformation() + clear_authentication_information.LastUpdateTime = samba.unix2nttime(int(time.time())) + clear_authentication_information.AuthType = lsa.TRUST_AUTH_TYPE_CLEAR + clear_authentication_information.AuthInfo = clear_value + + authentication_information_array = drsblobs.AuthenticationInformationArray() + authentication_information_array.count = 1 + authentication_information_array.array = [clear_authentication_information] + + outgoing = drsblobs.trustAuthInOutBlob() + outgoing.count = 1 + outgoing.current = authentication_information_array + + trustpass = drsblobs.trustDomainPasswords() + confounder = [3] * 512 + + for i in range(512): + confounder[i] = random.randint(0, 255) + + trustpass.confounder = confounder + + trustpass.outgoing = outgoing + trustpass.incoming = outgoing + + trustpass_blob = ndr_pack(trustpass) + + encrypted_trustpass = arcfour_encrypt(lsaconn.session_key, trustpass_blob) + + auth_blob = lsa.DATA_BUF2() + auth_blob.size = len(encrypted_trustpass) + auth_blob.data = string_to_byte_array(encrypted_trustpass) + + auth_info = lsa.TrustDomainInfoAuthInfoInternal() + auth_info.auth_blob = auth_blob + + trustdom_handle = lsaconn.CreateTrustedDomainEx2(pol_handle, + info, + auth_info, + security.SEC_STD_DELETE) + + rec = { + "dn": "cn=%s,cn=system,%s" % (ctx.dnsforest, ctx.base_dn), + "objectclass": "trustedDomain", + "trustType": str(info.trust_type), + "trustAttributes": str(info.trust_attributes), + "trustDirection": str(info.trust_direction), + "flatname": ctx.forest_domain_name, + "trustPartner": ctx.dnsforest, + "trustAuthIncoming": ndr_pack(outgoing), + "trustAuthOutgoing": ndr_pack(outgoing), + "securityIdentifier": ndr_pack(ctx.forestsid) + } + ctx.local_samdb.add(rec) + + rec = { + "dn": "cn=%s$,cn=users,%s" % (ctx.forest_domain_name, ctx.base_dn), + "objectclass": "user", + "userAccountControl": str(samba.dsdb.UF_INTERDOMAIN_TRUST_ACCOUNT), + "clearTextPassword": ctx.trustdom_pass.encode('utf-16-le'), + "samAccountName": "%s$" % ctx.forest_domain_name + } + ctx.local_samdb.add(rec) + + def build_nc_lists(ctx): + # nc_list is the list of naming context (NC) for which we will + # replicate in and send a updateRef command to the partner DC + + # full_nc_list is the list of naming context (NC) we hold + # read/write copies of. These are not subsets of each other. + ctx.nc_list = [ctx.config_dn, ctx.schema_dn] + ctx.full_nc_list = [ctx.base_dn, ctx.config_dn, ctx.schema_dn] + + if ctx.subdomain and ctx.dns_backend != "NONE": + ctx.full_nc_list += [ctx.domaindns_zone] + + elif not ctx.subdomain: + ctx.nc_list += [ctx.base_dn] + + if ctx.dns_backend != "NONE": + ctx.nc_list += [ctx.domaindns_zone] + ctx.nc_list += [ctx.forestdns_zone] + ctx.full_nc_list += [ctx.domaindns_zone] + ctx.full_nc_list += [ctx.forestdns_zone] + + def do_join(ctx): + ctx.build_nc_lists() + + if ctx.promote_existing: + ctx.promote_possible() + else: + ctx.cleanup_old_join() + + try: + ctx.join_add_objects() + ctx.join_provision() + ctx.join_replicate() + if ctx.subdomain: + ctx.join_add_objects2() + ctx.join_provision_own_domain() + ctx.join_setup_trusts() + + if ctx.dns_backend != "NONE": + ctx.join_add_dns_records() + ctx.join_replicate_new_dns_records() + + ctx.join_finalise() + except: + try: + print("Join failed - cleaning up") + except IOError: + pass + + # cleanup the failed join (checking we still have a live LDB + # connection to the remote DC first) + ctx.refresh_ldb_connection() + ctx.cleanup_old_join() + raise + + +def join_RODC(logger=None, server=None, creds=None, lp=None, site=None, netbios_name=None, + targetdir=None, domain=None, domain_critical_only=False, + machinepass=None, use_ntvfs=False, dns_backend=None, + promote_existing=False, plaintext_secrets=False, + backend_store=None, + backend_store_size=None): + """Join as a RODC.""" + + ctx = DCJoinContext(logger, server, creds, lp, site, netbios_name, + targetdir, domain, machinepass, use_ntvfs, dns_backend, + promote_existing, plaintext_secrets, + backend_store=backend_store, + backend_store_size=backend_store_size) + + lp.set("workgroup", ctx.domain_name) + logger.info("workgroup is %s" % ctx.domain_name) + + lp.set("realm", ctx.realm) + logger.info("realm is %s" % ctx.realm) + + ctx.krbtgt_dn = "CN=krbtgt_%s,CN=Users,%s" % (ctx.myname, ctx.base_dn) + + # setup some defaults for accounts that should be replicated to this RODC + ctx.never_reveal_sid = [ + "" % (ctx.domsid, security.DOMAIN_RID_RODC_DENY), + "" % security.SID_BUILTIN_ADMINISTRATORS, + "" % security.SID_BUILTIN_SERVER_OPERATORS, + "" % security.SID_BUILTIN_BACKUP_OPERATORS, + "" % security.SID_BUILTIN_ACCOUNT_OPERATORS] + ctx.reveal_sid = "" % (ctx.domsid, security.DOMAIN_RID_RODC_ALLOW) + + mysid = ctx.get_mysid() + admin_dn = "" % mysid + ctx.managedby = admin_dn + + ctx.userAccountControl = (samba.dsdb.UF_WORKSTATION_TRUST_ACCOUNT | + samba.dsdb.UF_TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION | + samba.dsdb.UF_PARTIAL_SECRETS_ACCOUNT) + + ctx.SPNs.extend(["RestrictedKrbHost/%s" % ctx.myname, + "RestrictedKrbHost/%s" % ctx.dnshostname]) + + ctx.connection_dn = "CN=RODC Connection (FRS),%s" % ctx.ntds_dn + ctx.secure_channel_type = misc.SEC_CHAN_RODC + ctx.RODC = True + ctx.replica_flags |= (drsuapi.DRSUAPI_DRS_SPECIAL_SECRET_PROCESSING | + drsuapi.DRSUAPI_DRS_GET_ALL_GROUP_MEMBERSHIP) + ctx.domain_replica_flags = ctx.replica_flags + if domain_critical_only: + ctx.domain_replica_flags |= drsuapi.DRSUAPI_DRS_CRITICAL_ONLY + + ctx.do_join() + + logger.info("Joined domain %s (SID %s) as an RODC" % (ctx.domain_name, ctx.domsid)) + + +def join_DC(logger=None, server=None, creds=None, lp=None, site=None, netbios_name=None, + targetdir=None, domain=None, domain_critical_only=False, + machinepass=None, use_ntvfs=False, dns_backend=None, + promote_existing=False, plaintext_secrets=False, + backend_store=None, + backend_store_size=None): + """Join as a DC.""" + ctx = DCJoinContext(logger, server, creds, lp, site, netbios_name, + targetdir, domain, machinepass, use_ntvfs, dns_backend, + promote_existing, plaintext_secrets, + backend_store=backend_store, + backend_store_size=backend_store_size) + + lp.set("workgroup", ctx.domain_name) + logger.info("workgroup is %s" % ctx.domain_name) + + lp.set("realm", ctx.realm) + logger.info("realm is %s" % ctx.realm) + + ctx.userAccountControl = samba.dsdb.UF_SERVER_TRUST_ACCOUNT | samba.dsdb.UF_TRUSTED_FOR_DELEGATION + + ctx.SPNs.append('E3514235-4B06-11D1-AB04-00C04FC2DCD2/$NTDSGUID/%s' % ctx.dnsdomain) + ctx.secure_channel_type = misc.SEC_CHAN_BDC + + ctx.replica_flags |= (drsuapi.DRSUAPI_DRS_WRIT_REP | + drsuapi.DRSUAPI_DRS_FULL_SYNC_IN_PROGRESS) + ctx.domain_replica_flags = ctx.replica_flags + if domain_critical_only: + ctx.domain_replica_flags |= drsuapi.DRSUAPI_DRS_CRITICAL_ONLY + + ctx.do_join() + logger.info("Joined domain %s (SID %s) as a DC" % (ctx.domain_name, ctx.domsid)) + + +def join_clone(logger=None, server=None, creds=None, lp=None, + targetdir=None, domain=None, include_secrets=False, + dns_backend="NONE", backend_store=None, + backend_store_size=None): + """Creates a local clone of a remote DC.""" + ctx = DCCloneContext(logger, server, creds, lp, targetdir=targetdir, + domain=domain, dns_backend=dns_backend, + include_secrets=include_secrets, + backend_store=backend_store, + backend_store_size=backend_store_size) + + lp.set("workgroup", ctx.domain_name) + logger.info("workgroup is %s" % ctx.domain_name) + + lp.set("realm", ctx.realm) + logger.info("realm is %s" % ctx.realm) + + ctx.do_join() + logger.info("Cloned domain %s (SID %s)" % (ctx.domain_name, ctx.domsid)) + return ctx + + +class DCCloneContext(DCJoinContext): + """Clones a remote DC.""" + + def __init__(ctx, logger=None, server=None, creds=None, lp=None, + targetdir=None, domain=None, dns_backend=None, + include_secrets=False, backend_store=None, + backend_store_size=None): + super().__init__(logger, server, creds, lp, + targetdir=targetdir, domain=domain, + dns_backend=dns_backend, + backend_store=backend_store, + backend_store_size=backend_store_size) + + # As we don't want to create or delete these DNs, we set them to None + ctx.server_dn = None + ctx.ntds_dn = None + ctx.acct_dn = None + ctx.myname = ctx.server.split('.')[0] + ctx.ntds_guid = None + ctx.rid_manager_dn = None + + # Save this early + ctx.remote_dc_ntds_guid = ctx.samdb.get_ntds_GUID() + + ctx.replica_flags |= (drsuapi.DRSUAPI_DRS_WRIT_REP | + drsuapi.DRSUAPI_DRS_FULL_SYNC_IN_PROGRESS) + if not include_secrets: + ctx.replica_flags |= drsuapi.DRSUAPI_DRS_SPECIAL_SECRET_PROCESSING + ctx.domain_replica_flags = ctx.replica_flags + + def join_finalise(ctx): + ctx.logger.info("Setting isSynchronized and dsServiceName") + m = ldb.Message() + m.dn = ldb.Dn(ctx.local_samdb, '@ROOTDSE') + m["isSynchronized"] = ldb.MessageElement("TRUE", ldb.FLAG_MOD_REPLACE, + "isSynchronized") + + # We want to appear to be the server we just cloned + guid = ctx.remote_dc_ntds_guid + m["dsServiceName"] = ldb.MessageElement("" % str(guid), + ldb.FLAG_MOD_REPLACE, + "dsServiceName") + ctx.local_samdb.modify(m) + + def do_join(ctx): + ctx.build_nc_lists() + + # When cloning a DC, we just want to provision a DC locally, then + # grab the remote DC's entire DB via DRS replication + ctx.join_provision() + ctx.join_replicate() + ctx.join_finalise() + + +# Used to create a renamed backup of a DC. Renaming the domain means that the +# cloned/backup DC can be started without interfering with the production DC. +class DCCloneAndRenameContext(DCCloneContext): + """Clones a remote DC, renaming the domain along the way.""" + + def __init__(ctx, new_base_dn, new_domain_name, new_realm, logger=None, + server=None, creds=None, lp=None, targetdir=None, domain=None, + dns_backend=None, include_secrets=True, backend_store=None): + super().__init__(logger, server, creds, lp, + targetdir=targetdir, + domain=domain, + dns_backend=dns_backend, + include_secrets=include_secrets, + backend_store=backend_store) + # store the new DN (etc) that we want the cloned DB to use + ctx.new_base_dn = new_base_dn + ctx.new_domain_name = new_domain_name + ctx.new_realm = new_realm + + def create_replicator(ctx, repl_creds, binding_options): + """Creates a new DRS object for managing replications""" + + # We want to rename all the domain objects, and the simplest way to do + # this is during replication. This is because the base DN of the top- + # level replicated object will flow through to all the objects below it + binding_str = "ncacn_ip_tcp:%s[%s]" % (ctx.server, binding_options) + return drs_utils.drs_ReplicateRenamer(binding_str, ctx.lp, repl_creds, + ctx.local_samdb, + ctx.invocation_id, + ctx.base_dn, ctx.new_base_dn) + + def create_non_global_lp(ctx, global_lp): + """Creates a non-global LoadParm based on the global LP's settings""" + + # the samba code shares a global LoadParm by default. Here we create a + # new LoadParm that retains the global settings, but any changes we + # make to it won't automatically affect the rest of the samba code. + # The easiest way to do this is to dump the global settings to a + # temporary smb.conf file, and then load the temp file into a new + # non-global LoadParm + fd, tmp_file = tempfile.mkstemp() + global_lp.dump(False, tmp_file) + local_lp = samba.param.LoadParm(filename_for_non_global_lp=tmp_file) + os.remove(tmp_file) + return local_lp + + def rename_dn(ctx, dn_str): + """Uses string substitution to replace the base DN""" + old_base_dn = ctx.base_dn + return re.sub('%s$' % old_base_dn, ctx.new_base_dn, dn_str) + + # we want to override the normal DCCloneContext's join_provision() so that + # use the new domain DNs during the provision. We do this because: + # - it sets up smb.conf/secrets.ldb with the new realm/workgroup values + # - it sets up a default SAM DB that uses the new Schema DNs (without which + # we couldn't apply the renamed DRS objects during replication) + def join_provision(ctx): + """Provision the local (renamed) SAM.""" + + print("Provisioning the new (renamed) domain...") + + # the provision() calls make_smbconf() which uses lp.dump()/lp.load() + # to create a new smb.conf. By default, it uses the global LoadParm to + # do this, and so it would overwrite the realm/domain values globally. + # We still need the global LoadParm to retain the old domain's details, + # so we can connect to (and clone) the existing DC. + # So, copy the global settings into a non-global LoadParm, which we can + # then pass into provision(). This generates a new smb.conf correctly, + # without overwriting the global realm/domain values just yet. + non_global_lp = ctx.create_non_global_lp(ctx.lp) + + # do the provision with the new/renamed domain DN values + presult = provision(ctx.logger, system_session(), + targetdir=ctx.targetdir, samdb_fill=FILL_DRS, + realm=ctx.new_realm, lp=non_global_lp, + rootdn=ctx.rename_dn(ctx.root_dn), domaindn=ctx.new_base_dn, + schemadn=ctx.rename_dn(ctx.schema_dn), + configdn=ctx.rename_dn(ctx.config_dn), + domain=ctx.new_domain_name, domainsid=ctx.domsid, + serverrole="active directory domain controller", + dns_backend=ctx.dns_backend, + backend_store=ctx.backend_store) + + print("Provision OK for renamed domain DN %s" % presult.domaindn) + ctx.local_samdb = presult.samdb + ctx.paths = presult.paths diff --git a/python/samba/kcc/__init__.py b/python/samba/kcc/__init__.py new file mode 100644 index 0000000..22590d0 --- /dev/null +++ b/python/samba/kcc/__init__.py @@ -0,0 +1,2754 @@ +# define the KCC object +# +# Copyright (C) Dave Craft 2011 +# Copyright (C) Andrew Bartlett 2015 +# +# Andrew Bartlett's alleged work performed by his underlings Douglas +# Bagnall and Garming Sam. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import random +import uuid +from functools import cmp_to_key +import itertools +from samba import unix2nttime, nttime2unix +from samba import ldb, dsdb, drs_utils +from samba.auth import system_session +from samba.samdb import SamDB +from samba.dcerpc import drsuapi, misc + +from samba.kcc.kcc_utils import Site, Partition, Transport, SiteLink +from samba.kcc.kcc_utils import NCReplica, NCType, nctype_lut, GraphNode +from samba.kcc.kcc_utils import RepsFromTo, KCCError, KCCFailedObject +from samba.kcc.graph import convert_schedule_to_repltimes + +from samba.ndr import ndr_pack + +from samba.kcc.graph_utils import verify_and_dot + +from samba.kcc import ldif_import_export +from samba.kcc.graph import setup_graph, get_spanning_tree_edges +from samba.kcc.graph import Vertex + +from samba.kcc.debug import DEBUG, DEBUG_FN, logger +from samba.kcc import debug +from samba.common import cmp + + +def sort_dsa_by_gc_and_guid(dsa1, dsa2): + """Helper to sort DSAs by guid global catalog status + + GC DSAs come before non-GC DSAs, other than that, the guids are + sorted in NDR form. + + :param dsa1: A DSA object + :param dsa2: Another DSA + :return: -1, 0, or 1, indicating sort order. + """ + if dsa1.is_gc() and not dsa2.is_gc(): + return -1 + if not dsa1.is_gc() and dsa2.is_gc(): + return +1 + return cmp(ndr_pack(dsa1.dsa_guid), ndr_pack(dsa2.dsa_guid)) + + +def is_smtp_replication_available(): + """Can the KCC use SMTP replication? + + Currently always returns false because Samba doesn't implement + SMTP transfer for NC changes between DCs. + + :return: Boolean (always False) + """ + return False + + +class KCC(object): + """The Knowledge Consistency Checker class. + + A container for objects and methods allowing a run of the KCC. Produces a + set of connections in the samdb for which the Distributed Replication + Service can then utilize to replicate naming contexts + + :param unix_now: The putative current time in seconds since 1970. + :param readonly: Don't write to the database. + :param verify: Check topological invariants for the generated graphs + :param debug: Write verbosely to stderr. + :param dot_file_dir: write diagnostic Graphviz files in this directory + """ + def __init__(self, unix_now, readonly=False, verify=False, debug=False, + dot_file_dir=None): + """Initializes the partitions class which can hold + our local DCs partitions or all the partitions in + the forest + """ + self.part_table = {} # partition objects + self.site_table = {} + self.ip_transport = None + self.sitelink_table = {} + self.dsa_by_dnstr = {} + self.dsa_by_guid = {} + + self.get_dsa_by_guidstr = self.dsa_by_guid.get + self.get_dsa = self.dsa_by_dnstr.get + + # TODO: These should be backed by a 'permanent' store so that when + # calling DRSGetReplInfo with DS_REPL_INFO_KCC_DSA_CONNECT_FAILURES, + # the failure information can be returned + self.kcc_failed_links = {} + self.kcc_failed_connections = set() + + # Used in inter-site topology computation. A list + # of connections (by NTDSConnection object) that are + # to be kept when pruning un-needed NTDS Connections + self.kept_connections = set() + + self.my_dsa_dnstr = None # My dsa DN + self.my_dsa = None # My dsa object + + self.my_site_dnstr = None + self.my_site = None + + self.samdb = None + + self.unix_now = unix_now + self.nt_now = unix2nttime(unix_now) + self.readonly = readonly + self.verify = verify + self.debug = debug + self.dot_file_dir = dot_file_dir + + def load_ip_transport(self): + """Loads the inter-site transport objects for Sites + + :return: None + :raise KCCError: if no IP transport is found + """ + try: + res = self.samdb.search("CN=Inter-Site Transports,CN=Sites,%s" % + self.samdb.get_config_basedn(), + scope=ldb.SCOPE_SUBTREE, + expression="(objectClass=interSiteTransport)") + except ldb.LdbError as e2: + (enum, estr) = e2.args + raise KCCError("Unable to find inter-site transports - (%s)" % + estr) + + for msg in res: + dnstr = str(msg.dn) + + transport = Transport(dnstr) + + transport.load_transport(self.samdb) + if transport.name == 'IP': + self.ip_transport = transport + elif transport.name == 'SMTP': + logger.debug("Samba KCC is ignoring the obsolete " + "SMTP transport.") + + else: + logger.warning("Samba KCC does not support the transport " + "called %r." % (transport.name,)) + + if self.ip_transport is None: + raise KCCError("there doesn't seem to be an IP transport") + + def load_all_sitelinks(self): + """Loads the inter-site siteLink objects + + :return: None + :raise KCCError: if site-links aren't found + """ + try: + res = self.samdb.search("CN=Inter-Site Transports,CN=Sites,%s" % + self.samdb.get_config_basedn(), + scope=ldb.SCOPE_SUBTREE, + expression="(objectClass=siteLink)") + except ldb.LdbError as e3: + (enum, estr) = e3.args + raise KCCError("Unable to find inter-site siteLinks - (%s)" % estr) + + for msg in res: + dnstr = str(msg.dn) + + # already loaded + if dnstr in self.sitelink_table: + continue + + sitelink = SiteLink(dnstr) + + sitelink.load_sitelink(self.samdb) + + # Assign this siteLink to table + # and index by dn + self.sitelink_table[dnstr] = sitelink + + def load_site(self, dn_str): + """Helper for load_my_site and load_all_sites. + + Put all the site's DSAs into the KCC indices. + + :param dn_str: a site dn_str + :return: the Site object pertaining to the dn_str + """ + site = Site(dn_str, self.unix_now) + site.load_site(self.samdb) + + # We avoid replacing the site with an identical copy in case + # somewhere else has a reference to the old one, which would + # lead to all manner of confusion and chaos. + guid = str(site.site_guid) + if guid not in self.site_table: + self.site_table[guid] = site + self.dsa_by_dnstr.update(site.dsa_table) + self.dsa_by_guid.update((str(x.dsa_guid), x) + for x in site.dsa_table.values()) + + return self.site_table[guid] + + def load_my_site(self): + """Load the Site object for the local DSA. + + :return: None + """ + self.my_site_dnstr = ("CN=%s,CN=Sites,%s" % ( + self.samdb.server_site_name(), + self.samdb.get_config_basedn())) + + self.my_site = self.load_site(self.my_site_dnstr) + + def load_all_sites(self): + """Discover all sites and create Site objects. + + :return: None + :raise: KCCError if sites can't be found + """ + try: + res = self.samdb.search("CN=Sites,%s" % + self.samdb.get_config_basedn(), + scope=ldb.SCOPE_SUBTREE, + expression="(objectClass=site)") + except ldb.LdbError as e4: + (enum, estr) = e4.args + raise KCCError("Unable to find sites - (%s)" % estr) + + for msg in res: + sitestr = str(msg.dn) + self.load_site(sitestr) + + def load_my_dsa(self): + """Discover my nTDSDSA dn thru the rootDSE entry + + :return: None + :raise: KCCError if DSA can't be found + """ + dn_query = "" % self.samdb.get_ntds_GUID() + dn = ldb.Dn(self.samdb, dn_query) + try: + res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE, + attrs=["objectGUID"]) + except ldb.LdbError as e5: + (enum, estr) = e5.args + DEBUG_FN("Search for dn '%s' [from %s] failed: %s. " + "This typically happens in --importldif mode due " + "to lack of module support." % (dn, dn_query, estr)) + try: + # We work around the failure above by looking at the + # dsServiceName that was put in the fake rootdse by + # the --exportldif, rather than the + # samdb.get_ntds_GUID(). The disadvantage is that this + # mode requires we modify the @ROOTDSE dnq to support + # --forced-local-dsa + service_name_res = self.samdb.search(base="", + scope=ldb.SCOPE_BASE, + attrs=["dsServiceName"]) + dn = ldb.Dn(self.samdb, + service_name_res[0]["dsServiceName"][0].decode('utf8')) + + res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE, + attrs=["objectGUID"]) + except ldb.LdbError as e: + (enum, estr) = e.args + raise KCCError("Unable to find my nTDSDSA - (%s)" % estr) + + if len(res) != 1: + raise KCCError("Unable to find my nTDSDSA at %s" % + dn.extended_str()) + + ntds_guid = misc.GUID(self.samdb.get_ntds_GUID()) + if misc.GUID(res[0]["objectGUID"][0]) != ntds_guid: + raise KCCError("Did not find the GUID we expected," + " perhaps due to --importldif") + + self.my_dsa_dnstr = str(res[0].dn) + + self.my_dsa = self.my_site.get_dsa(self.my_dsa_dnstr) + + if self.my_dsa_dnstr not in self.dsa_by_dnstr: + debug.DEBUG_DARK_YELLOW("my_dsa %s isn't in self.dsas_by_dnstr:" + " it must be RODC.\n" + "Let's add it, because my_dsa is special!" + "\n(likewise for self.dsa_by_guid)" % + self.my_dsa_dnstr) + + self.dsa_by_dnstr[self.my_dsa_dnstr] = self.my_dsa + self.dsa_by_guid[str(self.my_dsa.dsa_guid)] = self.my_dsa + + def load_all_partitions(self): + """Discover and load all partitions. + + Each NC is inserted into the part_table by partition + dn string (not the nCName dn string) + + :return: None + :raise: KCCError if partitions can't be found + """ + try: + res = self.samdb.search("CN=Partitions,%s" % + self.samdb.get_config_basedn(), + scope=ldb.SCOPE_SUBTREE, + expression="(objectClass=crossRef)") + except ldb.LdbError as e6: + (enum, estr) = e6.args + raise KCCError("Unable to find partitions - (%s)" % estr) + + for msg in res: + partstr = str(msg.dn) + + # already loaded + if partstr in self.part_table: + continue + + part = Partition(partstr) + + part.load_partition(self.samdb) + self.part_table[partstr] = part + + def refresh_failed_links_connections(self, ping=None): + """Ensure the failed links list is up to date + + Based on MS-ADTS 6.2.2.1 + + :param ping: An oracle function of remote site availability + :return: None + """ + # LINKS: Refresh failed links + self.kcc_failed_links = {} + current, needed = self.my_dsa.get_rep_tables() + for replica in current.values(): + # For every possible connection to replicate + for reps_from in replica.rep_repsFrom: + failure_count = reps_from.consecutive_sync_failures + if failure_count <= 0: + continue + + dsa_guid = str(reps_from.source_dsa_obj_guid) + time_first_failure = reps_from.last_success + last_result = reps_from.last_attempt + dns_name = reps_from.dns_name1 + + f = self.kcc_failed_links.get(dsa_guid) + if f is None: + f = KCCFailedObject(dsa_guid, failure_count, + time_first_failure, last_result, + dns_name) + self.kcc_failed_links[dsa_guid] = f + else: + f.failure_count = max(f.failure_count, failure_count) + f.time_first_failure = min(f.time_first_failure, + time_first_failure) + f.last_result = last_result + + # CONNECTIONS: Refresh failed connections + restore_connections = set() + if ping is not None: + DEBUG("refresh_failed_links: checking if links are still down") + for connection in self.kcc_failed_connections: + if ping(connection.dns_name): + # Failed connection is no longer failing + restore_connections.add(connection) + else: + connection.failure_count += 1 + else: + DEBUG("refresh_failed_links: not checking live links because we\n" + "weren't asked to --attempt-live-connections") + + # Remove the restored connections from the failed connections + self.kcc_failed_connections.difference_update(restore_connections) + + def is_stale_link_connection(self, target_dsa): + """Check whether a link to a remote DSA is stale + + Used in MS-ADTS 6.2.2.2 Intrasite Connection Creation + + Returns True if the remote seems to have been down for at + least two hours, otherwise False. + + :param target_dsa: the remote DSA object + :return: True if link is stale, otherwise False + """ + failed_link = self.kcc_failed_links.get(str(target_dsa.dsa_guid)) + if failed_link: + # failure_count should be > 0, but check anyways + if failed_link.failure_count > 0: + unix_first_failure = \ + nttime2unix(failed_link.time_first_failure) + # TODO guard against future + if unix_first_failure > self.unix_now: + logger.error("The last success time attribute for " + "repsFrom is in the future!") + + # Perform calculation in seconds + if (self.unix_now - unix_first_failure) > 60 * 60 * 2: + return True + + # TODO connections. + # We have checked failed *links*, but we also need to check + # *connections* + + return False + + # TODO: This should be backed by some form of local database + def remove_unneeded_failed_links_connections(self): + # Remove all tuples in kcc_failed_links where failure count = 0 + # In this implementation, this should never happen. + + # Remove all connections which were not used this run or connections + # that became active during this run. + pass + + def _ensure_connections_are_loaded(self, connections): + """Load or fake-load NTDSConnections lacking GUIDs + + New connections don't have GUIDs and created times which are + needed for sorting. If we're in read-only mode, we make fake + GUIDs, otherwise we ask SamDB to do it for us. + + :param connections: an iterable of NTDSConnection objects. + :return: None + """ + for cn_conn in connections: + if cn_conn.guid is None: + if self.readonly: + cn_conn.guid = misc.GUID(str(uuid.uuid4())) + cn_conn.whenCreated = self.nt_now + else: + cn_conn.load_connection(self.samdb) + + def _mark_broken_ntdsconn(self): + """Find NTDS Connections that lack a remote + + I'm not sure how they appear. Let's be rid of them by marking + them with the to_be_deleted attribute. + + :return: None + """ + for cn_conn in self.my_dsa.connect_table.values(): + s_dnstr = cn_conn.get_from_dnstr() + if s_dnstr is None: + DEBUG_FN("%s has phantom connection %s" % (self.my_dsa, + cn_conn)) + cn_conn.to_be_deleted = True + + def _mark_unneeded_local_ntdsconn(self): + """Find unneeded intrasite NTDS Connections for removal + + Based on MS-ADTS 6.2.2.4 Removing Unnecessary Connections. + Every DC removes its own unnecessary intrasite connections. + This function tags them with the to_be_deleted attribute. + + :return: None + """ + # XXX should an RODC be regarded as same site? It isn't part + # of the intrasite ring. + + if self.my_site.is_cleanup_ntdsconn_disabled(): + DEBUG_FN("not doing ntdsconn cleanup for site %s, " + "because it is disabled" % self.my_site) + return + + mydsa = self.my_dsa + + try: + self._ensure_connections_are_loaded(mydsa.connect_table.values()) + except KCCError: + # RODC never actually added any connections to begin with + if mydsa.is_ro(): + return + + local_connections = [] + + for cn_conn in mydsa.connect_table.values(): + s_dnstr = cn_conn.get_from_dnstr() + if s_dnstr in self.my_site.dsa_table: + removable = not (cn_conn.is_generated() or + cn_conn.is_rodc_topology()) + packed_guid = ndr_pack(cn_conn.guid) + local_connections.append((cn_conn, s_dnstr, + packed_guid, removable)) + + # Avoid "ValueError: r cannot be bigger than the iterable" in + # for a, b in itertools.permutations(local_connections, 2): + if (len(local_connections) < 2): + return + + for a, b in itertools.permutations(local_connections, 2): + cn_conn, s_dnstr, packed_guid, removable = a + cn_conn2, s_dnstr2, packed_guid2, removable2 = b + if (removable and + s_dnstr == s_dnstr2 and + cn_conn.whenCreated < cn_conn2.whenCreated or + (cn_conn.whenCreated == cn_conn2.whenCreated and + packed_guid < packed_guid2)): + cn_conn.to_be_deleted = True + + def _mark_unneeded_intersite_ntdsconn(self): + """find unneeded intersite NTDS Connections for removal + + Based on MS-ADTS 6.2.2.4 Removing Unnecessary Connections. The + intersite topology generator removes links for all DCs in its + site. Here we just tag them with the to_be_deleted attribute. + + :return: None + """ + # TODO Figure out how best to handle the RODC case + # The RODC is ISTG, but shouldn't act on anyone's behalf. + if self.my_dsa.is_ro(): + return + + # Find the intersite connections + local_dsas = self.my_site.dsa_table + connections_and_dsas = [] + for dsa in local_dsas.values(): + for cn in dsa.connect_table.values(): + if cn.to_be_deleted: + continue + s_dnstr = cn.get_from_dnstr() + if s_dnstr is None: + continue + if s_dnstr not in local_dsas: + from_dsa = self.get_dsa(s_dnstr) + # Samba ONLY: ISTG removes connections to dead DCs + if from_dsa is None or '\\0ADEL' in s_dnstr: + logger.info("DSA appears deleted, removing connection %s" + % s_dnstr) + cn.to_be_deleted = True + continue + connections_and_dsas.append((cn, dsa, from_dsa)) + + self._ensure_connections_are_loaded(x[0] for x in connections_and_dsas) + for cn, to_dsa, from_dsa in connections_and_dsas: + if not cn.is_generated() or cn.is_rodc_topology(): + continue + + # If the connection is in the kept_connections list, we + # only remove it if an endpoint seems down. + if (cn in self.kept_connections and + not (self.is_bridgehead_failed(to_dsa, True) or + self.is_bridgehead_failed(from_dsa, True))): + continue + + # this one is broken and might be superseded by another. + # But which other? Let's just say another link to the same + # site can supersede. + from_dnstr = from_dsa.dsa_dnstr + for site in self.site_table.values(): + if from_dnstr in site.rw_dsa_table: + for cn2, to_dsa2, from_dsa2 in connections_and_dsas: + if (cn is not cn2 and + from_dsa2 in site.rw_dsa_table): + cn.to_be_deleted = True + + def _commit_changes(self, dsa): + if dsa.is_ro() or self.readonly: + for connect in dsa.connect_table.values(): + if connect.to_be_deleted: + logger.info("TO BE DELETED:\n%s" % connect) + if connect.to_be_added: + logger.info("TO BE ADDED:\n%s" % connect) + if connect.to_be_modified: + logger.info("TO BE MODIFIED:\n%s" % connect) + + # Perform deletion from our tables but perform + # no database modification + dsa.commit_connections(self.samdb, ro=True) + else: + # Commit any modified connections + dsa.commit_connections(self.samdb) + + def remove_unneeded_ntdsconn(self, all_connected): + """Remove unneeded NTDS Connections once topology is calculated + + Based on MS-ADTS 6.2.2.4 Removing Unnecessary Connections + + :param all_connected: indicates whether all sites are connected + :return: None + """ + self._mark_broken_ntdsconn() + self._mark_unneeded_local_ntdsconn() + # if we are not the istg, we're done! + # if we are the istg, but all_connected is False, we also do nothing. + if self.my_dsa.is_istg() and all_connected: + self._mark_unneeded_intersite_ntdsconn() + + for dsa in self.my_site.dsa_table.values(): + self._commit_changes(dsa) + + def modify_repsFrom(self, n_rep, t_repsFrom, s_rep, s_dsa, cn_conn): + """Update an repsFrom object if required. + + Part of MS-ADTS 6.2.2.5. + + Update t_repsFrom if necessary to satisfy requirements. Such + updates are typically required when the IDL_DRSGetNCChanges + server has moved from one site to another--for example, to + enable compression when the server is moved from the + client's site to another site. + + The repsFrom.update_flags bit field may be modified + auto-magically if any changes are made here. See + kcc_utils.RepsFromTo for gory details. + + + :param n_rep: NC replica we need + :param t_repsFrom: repsFrom tuple to modify + :param s_rep: NC replica at source DSA + :param s_dsa: source DSA + :param cn_conn: Local DSA NTDSConnection child + + :return: None + """ + s_dnstr = s_dsa.dsa_dnstr + same_site = s_dnstr in self.my_site.dsa_table + + # if schedule doesn't match then update and modify + times = convert_schedule_to_repltimes(cn_conn.schedule) + if times != t_repsFrom.schedule: + t_repsFrom.schedule = times + + # Bit DRS_ADD_REF is set in replicaFlags unconditionally + # Samba ONLY: + if ((t_repsFrom.replica_flags & + drsuapi.DRSUAPI_DRS_ADD_REF) == 0x0): + t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_ADD_REF + + # Bit DRS_PER_SYNC is set in replicaFlags if and only + # if nTDSConnection schedule has a value v that specifies + # scheduled replication is to be performed at least once + # per week. + if cn_conn.is_schedule_minimum_once_per_week(): + + if ((t_repsFrom.replica_flags & + drsuapi.DRSUAPI_DRS_PER_SYNC) == 0x0): + t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_PER_SYNC + + # Bit DRS_INIT_SYNC is set in t.replicaFlags if and only + # if the source DSA and the local DC's nTDSDSA object are + # in the same site or source dsa is the FSMO role owner + # of one or more FSMO roles in the NC replica. + if same_site or n_rep.is_fsmo_role_owner(s_dnstr): + + if ((t_repsFrom.replica_flags & + drsuapi.DRSUAPI_DRS_INIT_SYNC) == 0x0): + t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_INIT_SYNC + + # If bit NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT is set in + # cn!options, bit DRS_NEVER_NOTIFY is set in t.replicaFlags + # if and only if bit NTDSCONN_OPT_USE_NOTIFY is clear in + # cn!options. Otherwise, bit DRS_NEVER_NOTIFY is set in + # t.replicaFlags if and only if s and the local DC's + # nTDSDSA object are in different sites. + if ((cn_conn.options & + dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT) != 0x0): + + if (cn_conn.options & dsdb.NTDSCONN_OPT_USE_NOTIFY) == 0x0: + # WARNING + # + # it LOOKS as if this next test is a bit silly: it + # checks the flag then sets it if it not set; the same + # effect could be achieved by unconditionally setting + # it. But in fact the repsFrom object has special + # magic attached to it, and altering replica_flags has + # side-effects. That is bad in my opinion, but there + # you go. + if ((t_repsFrom.replica_flags & + drsuapi.DRSUAPI_DRS_NEVER_NOTIFY) == 0x0): + t_repsFrom.replica_flags |= \ + drsuapi.DRSUAPI_DRS_NEVER_NOTIFY + + elif not same_site: + + if ((t_repsFrom.replica_flags & + drsuapi.DRSUAPI_DRS_NEVER_NOTIFY) == 0x0): + t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_NEVER_NOTIFY + + # Bit DRS_USE_COMPRESSION is set in t.replicaFlags if + # and only if s and the local DC's nTDSDSA object are + # not in the same site and the + # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION bit is + # clear in cn!options + if (not same_site and + (cn_conn.options & + dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION) == 0x0): + + if ((t_repsFrom.replica_flags & + drsuapi.DRSUAPI_DRS_USE_COMPRESSION) == 0x0): + t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_USE_COMPRESSION + + # Bit DRS_TWOWAY_SYNC is set in t.replicaFlags if and only + # if bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options. + if (cn_conn.options & dsdb.NTDSCONN_OPT_TWOWAY_SYNC) != 0x0: + + if ((t_repsFrom.replica_flags & + drsuapi.DRSUAPI_DRS_TWOWAY_SYNC) == 0x0): + t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_TWOWAY_SYNC + + # Bits DRS_DISABLE_AUTO_SYNC and DRS_DISABLE_PERIODIC_SYNC are + # set in t.replicaFlags if and only if cn!enabledConnection = false. + if not cn_conn.is_enabled(): + + if ((t_repsFrom.replica_flags & + drsuapi.DRSUAPI_DRS_DISABLE_AUTO_SYNC) == 0x0): + t_repsFrom.replica_flags |= \ + drsuapi.DRSUAPI_DRS_DISABLE_AUTO_SYNC + + if ((t_repsFrom.replica_flags & + drsuapi.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC) == 0x0): + t_repsFrom.replica_flags |= \ + drsuapi.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC + + # If s and the local DC's nTDSDSA object are in the same site, + # cn!transportType has no value, or the RDN of cn!transportType + # is CN=IP: + # + # Bit DRS_MAIL_REP in t.replicaFlags is clear. + # + # t.uuidTransport = NULL GUID. + # + # t.uuidDsa = The GUID-based DNS name of s. + # + # Otherwise: + # + # Bit DRS_MAIL_REP in t.replicaFlags is set. + # + # If x is the object with dsname cn!transportType, + # t.uuidTransport = x!objectGUID. + # + # Let a be the attribute identified by + # x!transportAddressAttribute. If a is + # the dNSHostName attribute, t.uuidDsa = the GUID-based + # DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a. + # + # It appears that the first statement i.e. + # + # "If s and the local DC's nTDSDSA object are in the same + # site, cn!transportType has no value, or the RDN of + # cn!transportType is CN=IP:" + # + # could be a slightly tighter statement if it had an "or" + # between each condition. I believe this should + # be interpreted as: + # + # IF (same-site) OR (no-value) OR (type-ip) + # + # because IP should be the primary transport mechanism + # (even in inter-site) and the absence of the transportType + # attribute should always imply IP no matter if its multi-site + # + # NOTE MS-TECH INCORRECT: + # + # All indications point to these statements above being + # incorrectly stated: + # + # t.uuidDsa = The GUID-based DNS name of s. + # + # Let a be the attribute identified by + # x!transportAddressAttribute. If a is + # the dNSHostName attribute, t.uuidDsa = the GUID-based + # DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a. + # + # because the uuidDSA is a GUID and not a GUID-base DNS + # name. Nor can uuidDsa hold (s!parent)!a if not + # dNSHostName. What should have been said is: + # + # t.naDsa = The GUID-based DNS name of s + # + # That would also be correct if transportAddressAttribute + # were "mailAddress" because (naDsa) can also correctly + # hold the SMTP ISM service address. + # + nastr = "%s._msdcs.%s" % (s_dsa.dsa_guid, self.samdb.forest_dns_name()) + + if ((t_repsFrom.replica_flags & + drsuapi.DRSUAPI_DRS_MAIL_REP) != 0x0): + t_repsFrom.replica_flags &= ~drsuapi.DRSUAPI_DRS_MAIL_REP + + t_repsFrom.transport_guid = misc.GUID() + + # See (NOTE MS-TECH INCORRECT) above + + # NOTE: it looks like these conditionals are pointless, + # because the state will end up as `t_repsFrom.dns_name1 == + # nastr` in either case, BUT the repsFrom thing is magic and + # assigning to it alters some flags. So we try not to update + # it unless necessary. + if t_repsFrom.dns_name1 != nastr: + t_repsFrom.dns_name1 = nastr + + if t_repsFrom.version > 0x1 and t_repsFrom.dns_name2 != nastr: + t_repsFrom.dns_name2 = nastr + + if t_repsFrom.is_modified(): + DEBUG_FN("modify_repsFrom(): %s" % t_repsFrom) + + def get_dsa_for_implied_replica(self, n_rep, cn_conn): + """If a connection imply a replica, find the relevant DSA + + Given a NC replica and NTDS Connection, determine if the + connection implies a repsFrom tuple should be present from the + source DSA listed in the connection to the naming context. If + it should be, return the DSA; otherwise return None. + + Based on part of MS-ADTS 6.2.2.5 + + :param n_rep: NC replica + :param cn_conn: NTDS Connection + :return: source DSA or None + """ + # XXX different conditions for "implies" than MS-ADTS 6.2.2 + # preamble. + + # It boils down to: we want an enabled, non-FRS connections to + # a valid remote DSA with a non-RO replica corresponding to + # n_rep. + + if not cn_conn.is_enabled() or cn_conn.is_rodc_topology(): + return None + + s_dnstr = cn_conn.get_from_dnstr() + s_dsa = self.get_dsa(s_dnstr) + + # No DSA matching this source DN string? + if s_dsa is None: + return None + + s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr) + + if (s_rep is not None and + s_rep.is_present() and + (not s_rep.is_ro() or n_rep.is_partial())): + return s_dsa + return None + + def translate_ntdsconn(self, current_dsa=None): + """Adjust repsFrom to match NTDSConnections + + This function adjusts values of repsFrom abstract attributes of NC + replicas on the local DC to match those implied by + nTDSConnection objects. + + Based on [MS-ADTS] 6.2.2.5 + + :param current_dsa: optional DSA on whose behalf we are acting. + :return: None + """ + ro = False + if current_dsa is None: + current_dsa = self.my_dsa + + if current_dsa.is_ro(): + ro = True + + if current_dsa.is_translate_ntdsconn_disabled(): + DEBUG_FN("skipping translate_ntdsconn() " + "because disabling flag is set") + return + + DEBUG_FN("translate_ntdsconn(): enter") + + current_rep_table, needed_rep_table = current_dsa.get_rep_tables() + + # Filled in with replicas we currently have that need deleting + delete_reps = set() + + # We're using the MS notation names here to allow + # correlation back to the published algorithm. + # + # n_rep - NC replica (n) + # t_repsFrom - tuple (t) in n!repsFrom + # s_dsa - Source DSA of the replica. Defined as nTDSDSA + # object (s) such that (s!objectGUID = t.uuidDsa) + # In our IDL representation of repsFrom the (uuidDsa) + # attribute is called (source_dsa_obj_guid) + # cn_conn - (cn) is nTDSConnection object and child of the local + # DC's nTDSDSA object and (cn!fromServer = s) + # s_rep - source DSA replica of n + # + # If we have the replica and its not needed + # then we add it to the "to be deleted" list. + for dnstr in current_rep_table: + # If we're on the RODC, hardcode the update flags + if ro: + c_rep = current_rep_table[dnstr] + c_rep.load_repsFrom(self.samdb) + for t_repsFrom in c_rep.rep_repsFrom: + replica_flags = (drsuapi.DRSUAPI_DRS_INIT_SYNC | + drsuapi.DRSUAPI_DRS_PER_SYNC | + drsuapi.DRSUAPI_DRS_ADD_REF | + drsuapi.DRSUAPI_DRS_SPECIAL_SECRET_PROCESSING | + drsuapi.DRSUAPI_DRS_NONGC_RO_REP) + if t_repsFrom.replica_flags != replica_flags: + t_repsFrom.replica_flags = replica_flags + c_rep.commit_repsFrom(self.samdb, ro=self.readonly) + else: + if dnstr not in needed_rep_table: + delete_reps.add(dnstr) + + DEBUG_FN('current %d needed %d delete %d' % (len(current_rep_table), + len(needed_rep_table), len(delete_reps))) + + if delete_reps: + # TODO Must delete repsFrom/repsTo for these replicas + DEBUG('deleting these reps: %s' % delete_reps) + for dnstr in delete_reps: + del current_rep_table[dnstr] + + # HANDLE REPS-FROM + # + # Now perform the scan of replicas we'll need + # and compare any current repsFrom against the + # connections + for n_rep in needed_rep_table.values(): + + # load any repsFrom and fsmo roles as we'll + # need them during connection translation + n_rep.load_repsFrom(self.samdb) + n_rep.load_fsmo_roles(self.samdb) + + # Loop thru the existing repsFrom tuples (if any) + # XXX This is a list and could contain duplicates + # (multiple load_repsFrom calls) + for t_repsFrom in n_rep.rep_repsFrom: + + # for each tuple t in n!repsFrom, let s be the nTDSDSA + # object such that s!objectGUID = t.uuidDsa + guidstr = str(t_repsFrom.source_dsa_obj_guid) + s_dsa = self.get_dsa_by_guidstr(guidstr) + + # Source dsa is gone from config (strange) + # so cleanup stale repsFrom for unlisted DSA + if s_dsa is None: + logger.warning("repsFrom source DSA guid (%s) not found" % + guidstr) + t_repsFrom.to_be_deleted = True + continue + + # Find the connection that this repsFrom would use. If + # there isn't a good one (i.e. non-RODC_TOPOLOGY, + # meaning non-FRS), we delete the repsFrom. + s_dnstr = s_dsa.dsa_dnstr + connections = current_dsa.get_connection_by_from_dnstr(s_dnstr) + for cn_conn in connections: + if not cn_conn.is_rodc_topology(): + break + else: + # no break means no non-rodc_topology connection exists + t_repsFrom.to_be_deleted = True + continue + + # KCC removes this repsFrom tuple if any of the following + # is true: + # No NC replica of the NC "is present" on DSA that + # would be source of replica + # + # A writable replica of the NC "should be present" on + # the local DC, but a partial replica "is present" on + # the source DSA + s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr) + + if s_rep is None or not s_rep.is_present() or \ + (not n_rep.is_ro() and s_rep.is_partial()): + + t_repsFrom.to_be_deleted = True + continue + + # If the KCC did not remove t from n!repsFrom, it updates t + self.modify_repsFrom(n_rep, t_repsFrom, s_rep, s_dsa, cn_conn) + + # Loop thru connections and add implied repsFrom tuples + # for each NTDSConnection under our local DSA if the + # repsFrom is not already present + for cn_conn in current_dsa.connect_table.values(): + + s_dsa = self.get_dsa_for_implied_replica(n_rep, cn_conn) + if s_dsa is None: + continue + + # Loop thru the existing repsFrom tuples (if any) and + # if we already have a tuple for this connection then + # no need to proceed to add. It will have been changed + # to have the correct attributes above + for t_repsFrom in n_rep.rep_repsFrom: + guidstr = str(t_repsFrom.source_dsa_obj_guid) + if s_dsa is self.get_dsa_by_guidstr(guidstr): + s_dsa = None + break + + if s_dsa is None: + continue + + # Create a new RepsFromTo and proceed to modify + # it according to specification + t_repsFrom = RepsFromTo(n_rep.nc_dnstr) + + t_repsFrom.source_dsa_obj_guid = s_dsa.dsa_guid + + s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr) + + self.modify_repsFrom(n_rep, t_repsFrom, s_rep, s_dsa, cn_conn) + + # Add to our NC repsFrom as this is newly computed + if t_repsFrom.is_modified(): + n_rep.rep_repsFrom.append(t_repsFrom) + + if self.readonly or ro: + # Display any to be deleted or modified repsFrom + text = n_rep.dumpstr_to_be_deleted() + if text: + logger.info("TO BE DELETED:\n%s" % text) + text = n_rep.dumpstr_to_be_modified() + if text: + logger.info("TO BE MODIFIED:\n%s" % text) + + # Perform deletion from our tables but perform + # no database modification + n_rep.commit_repsFrom(self.samdb, ro=True) + else: + # Commit any modified repsFrom to the NC replica + n_rep.commit_repsFrom(self.samdb) + + # HANDLE REPS-TO: + # + # Now perform the scan of replicas we'll need + # and compare any current repsTo against the + # connections + + # RODC should never push to anybody (should we check this?) + if ro: + return + + for n_rep in needed_rep_table.values(): + + # load any repsTo and fsmo roles as we'll + # need them during connection translation + n_rep.load_repsTo(self.samdb) + + # Loop thru the existing repsTo tuples (if any) + # XXX This is a list and could contain duplicates + # (multiple load_repsTo calls) + for t_repsTo in n_rep.rep_repsTo: + + # for each tuple t in n!repsTo, let s be the nTDSDSA + # object such that s!objectGUID = t.uuidDsa + guidstr = str(t_repsTo.source_dsa_obj_guid) + s_dsa = self.get_dsa_by_guidstr(guidstr) + + # Source dsa is gone from config (strange) + # so cleanup stale repsTo for unlisted DSA + if s_dsa is None: + logger.warning("repsTo source DSA guid (%s) not found" % + guidstr) + t_repsTo.to_be_deleted = True + continue + + # Find the connection that this repsTo would use. If + # there isn't a good one (i.e. non-RODC_TOPOLOGY, + # meaning non-FRS), we delete the repsTo. + s_dnstr = s_dsa.dsa_dnstr + if '\\0ADEL' in s_dnstr: + logger.warning("repsTo source DSA guid (%s) appears deleted" % + guidstr) + t_repsTo.to_be_deleted = True + continue + + connections = s_dsa.get_connection_by_from_dnstr(self.my_dsa_dnstr) + if len(connections) > 0: + # Then this repsTo is tentatively valid + continue + else: + # There is no plausible connection for this repsTo + t_repsTo.to_be_deleted = True + + if self.readonly: + # Display any to be deleted or modified repsTo + for rt in n_rep.rep_repsTo: + if rt.to_be_deleted: + logger.info("REMOVING REPS-TO: %s" % rt) + + # Perform deletion from our tables but perform + # no database modification + n_rep.commit_repsTo(self.samdb, ro=True) + else: + # Commit any modified repsTo to the NC replica + n_rep.commit_repsTo(self.samdb) + + # TODO Remove any duplicate repsTo values. This should never happen in + # any normal situations. + + def merge_failed_links(self, ping=None): + """Merge of kCCFailedLinks and kCCFailedLinks from bridgeheads. + + The KCC on a writable DC attempts to merge the link and connection + failure information from bridgehead DCs in its own site to help it + identify failed bridgehead DCs. + + Based on MS-ADTS 6.2.2.3.2 "Merge of kCCFailedLinks and kCCFailedLinks + from Bridgeheads" + + :param ping: An oracle of current bridgehead availability + :return: None + """ + # 1. Queries every bridgehead server in your site (other than yourself) + # 2. For every ntDSConnection that references a server in a different + # site merge all the failure info + # + # XXX - not implemented yet + if ping is not None: + debug.DEBUG_RED("merge_failed_links() is NOT IMPLEMENTED") + else: + DEBUG_FN("skipping merge_failed_links() because it requires " + "real network connections\n" + "and we weren't asked to --attempt-live-connections") + + def setup_graph(self, part): + """Set up an intersite graph + + An intersite graph has a Vertex for each site object, a + MultiEdge for each SiteLink object, and a MutliEdgeSet for + each siteLinkBridge object (or implied siteLinkBridge). It + reflects the intersite topology in a slightly more abstract + graph form. + + Roughly corresponds to MS-ADTS 6.2.2.3.4.3 + + :param part: a Partition object + :returns: an InterSiteGraph object + """ + # If 'Bridge all site links' is enabled and Win2k3 bridges required + # is not set + # NTDSTRANSPORT_OPT_BRIDGES_REQUIRED 0x00000002 + # No documentation for this however, ntdsapi.h appears to have: + # NTDSSETTINGS_OPT_W2K3_BRIDGES_REQUIRED = 0x00001000 + bridges_required = self.my_site.site_options & 0x00001002 != 0 + transport_guid = str(self.ip_transport.guid) + + g = setup_graph(part, self.site_table, transport_guid, + self.sitelink_table, bridges_required) + + if self.verify or self.dot_file_dir is not None: + dot_edges = [] + for edge in g.edges: + for a, b in itertools.combinations(edge.vertices, 2): + dot_edges.append((a.site.site_dnstr, b.site.site_dnstr)) + verify_properties = () + name = 'site_edges_%s' % part.partstr + verify_and_dot(name, dot_edges, directed=False, + label=self.my_dsa_dnstr, + properties=verify_properties, debug=DEBUG, + verify=self.verify, + dot_file_dir=self.dot_file_dir) + + return g + + def get_bridgehead(self, site, part, transport, partial_ok, detect_failed): + """Get a bridghead DC for a site. + + Part of MS-ADTS 6.2.2.3.4.4 + + :param site: site object representing for which a bridgehead + DC is desired. + :param part: crossRef for NC to replicate. + :param transport: interSiteTransport object for replication + traffic. + :param partial_ok: True if a DC containing a partial + replica or a full replica will suffice, False if only + a full replica will suffice. + :param detect_failed: True to detect failed DCs and route + replication traffic around them, False to assume no DC + has failed. + :return: dsa object for the bridgehead DC or None + """ + + bhs = self.get_all_bridgeheads(site, part, transport, + partial_ok, detect_failed) + if not bhs: + debug.DEBUG_MAGENTA("get_bridgehead FAILED:\nsitedn = %s" % + site.site_dnstr) + return None + + debug.DEBUG_GREEN("get_bridgehead:\n\tsitedn = %s\n\tbhdn = %s" % + (site.site_dnstr, bhs[0].dsa_dnstr)) + return bhs[0] + + def get_all_bridgeheads(self, site, part, transport, + partial_ok, detect_failed): + """Get all bridghead DCs on a site satisfying the given criteria + + Part of MS-ADTS 6.2.2.3.4.4 + + :param site: site object representing the site for which + bridgehead DCs are desired. + :param part: partition for NC to replicate. + :param transport: interSiteTransport object for + replication traffic. + :param partial_ok: True if a DC containing a partial + replica or a full replica will suffice, False if + only a full replica will suffice. + :param detect_failed: True to detect failed DCs and route + replication traffic around them, FALSE to assume + no DC has failed. + :return: list of dsa object for available bridgehead DCs + """ + bhs = [] + + if transport.name != "IP": + raise KCCError("get_all_bridgeheads has run into a " + "non-IP transport! %r" + % (transport.name,)) + + DEBUG_FN(site.rw_dsa_table) + for dsa in site.rw_dsa_table.values(): + + pdnstr = dsa.get_parent_dnstr() + + # IF t!bridgeheadServerListBL has one or more values and + # t!bridgeheadServerListBL does not contain a reference + # to the parent object of dc then skip dc + if ((len(transport.bridgehead_list) != 0 and + pdnstr not in transport.bridgehead_list)): + continue + + # IF dc is in the same site as the local DC + # IF a replica of cr!nCName is not in the set of NC replicas + # that "should be present" on dc or a partial replica of the + # NC "should be present" but partialReplicasOkay = FALSE + # Skip dc + if self.my_site.same_site(dsa): + needed, ro, partial = part.should_be_present(dsa) + if not needed or (partial and not partial_ok): + continue + rep = dsa.get_current_replica(part.nc_dnstr) + + # ELSE + # IF an NC replica of cr!nCName is not in the set of NC + # replicas that "are present" on dc or a partial replica of + # the NC "is present" but partialReplicasOkay = FALSE + # Skip dc + else: + rep = dsa.get_current_replica(part.nc_dnstr) + if rep is None or (rep.is_partial() and not partial_ok): + continue + + # IF AmIRODC() and cr!nCName corresponds to default NC then + # Let dsaobj be the nTDSDSA object of the dc + # IF dsaobj.msDS-Behavior-Version < DS_DOMAIN_FUNCTION_2008 + # Skip dc + if self.my_dsa.is_ro() and rep is not None and rep.is_default(): + if not dsa.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008): + continue + + # IF BridgeheadDCFailed(dc!objectGUID, detectFailedDCs) = TRUE + # Skip dc + if self.is_bridgehead_failed(dsa, detect_failed): + DEBUG("bridgehead is failed") + continue + + DEBUG_FN("found a bridgehead: %s" % dsa.dsa_dnstr) + bhs.append(dsa) + + # IF bit NTDSSETTINGS_OPT_IS_RAND_BH_SELECTION_DISABLED is set in + # s!options + # SORT bhs such that all GC servers precede DCs that are not GC + # servers, and otherwise by ascending objectGUID + # ELSE + # SORT bhs in a random order + if site.is_random_bridgehead_disabled(): + bhs.sort(key=cmp_to_key(sort_dsa_by_gc_and_guid)) + else: + random.shuffle(bhs) + debug.DEBUG_YELLOW(bhs) + return bhs + + def is_bridgehead_failed(self, dsa, detect_failed): + """Determine whether a given DC is known to be in a failed state + + :param dsa: the bridgehead to test + :param detect_failed: True to really check, False to assume no failure + :return: True if and only if the DC should be considered failed + + Here we DEPART from the pseudo code spec which appears to be + wrong. It says, in full: + + /***** BridgeheadDCFailed *****/ + /* Determine whether a given DC is known to be in a failed state. + * IN: objectGUID - objectGUID of the DC's nTDSDSA object. + * IN: detectFailedDCs - TRUE if and only failed DC detection is + * enabled. + * RETURNS: TRUE if and only if the DC should be considered to be in a + * failed state. + */ + BridgeheadDCFailed(IN GUID objectGUID, IN bool detectFailedDCs) : bool + { + IF bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED is set in + the options attribute of the site settings object for the local + DC's site + RETURN FALSE + ELSEIF a tuple z exists in the kCCFailedLinks or + kCCFailedConnections variables such that z.UUIDDsa = + objectGUID, z.FailureCount > 1, and the current time - + z.TimeFirstFailure > 2 hours + RETURN TRUE + ELSE + RETURN detectFailedDCs + ENDIF + } + + where you will see detectFailedDCs is not behaving as + advertised -- it is acting as a default return code in the + event that a failure is not detected, not a switch turning + detection on or off. Elsewhere the documentation seems to + concur with the comment rather than the code. + """ + if not detect_failed: + return False + + # NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED = 0x00000008 + # When DETECT_STALE_DISABLED, we can never know of if + # it's in a failed state + if self.my_site.site_options & 0x00000008: + return False + + return self.is_stale_link_connection(dsa) + + def create_connection(self, part, rbh, rsite, transport, + lbh, lsite, link_opt, link_sched, + partial_ok, detect_failed): + """Create an nTDSConnection object as specified if it doesn't exist. + + Part of MS-ADTS 6.2.2.3.4.5 + + :param part: crossRef object for the NC to replicate. + :param rbh: nTDSDSA object for DC to act as the + IDL_DRSGetNCChanges server (which is in a site other + than the local DC's site). + :param rsite: site of the rbh + :param transport: interSiteTransport object for the transport + to use for replication traffic. + :param lbh: nTDSDSA object for DC to act as the + IDL_DRSGetNCChanges client (which is in the local DC's site). + :param lsite: site of the lbh + :param link_opt: Replication parameters (aggregated siteLink options, + etc.) + :param link_sched: Schedule specifying the times at which + to begin replicating. + :partial_ok: True if bridgehead DCs containing partial + replicas of the NC are acceptable. + :param detect_failed: True to detect failed DCs and route + replication traffic around them, FALSE to assume no DC + has failed. + """ + rbhs_all = self.get_all_bridgeheads(rsite, part, transport, + partial_ok, False) + rbh_table = dict((x.dsa_dnstr, x) for x in rbhs_all) + + debug.DEBUG_GREY("rbhs_all: %s %s" % (len(rbhs_all), + [x.dsa_dnstr for x in rbhs_all])) + + # MS-TECH says to compute rbhs_avail but then doesn't use it + # rbhs_avail = self.get_all_bridgeheads(rsite, part, transport, + # partial_ok, detect_failed) + + lbhs_all = self.get_all_bridgeheads(lsite, part, transport, + partial_ok, False) + if lbh.is_ro(): + lbhs_all.append(lbh) + + debug.DEBUG_GREY("lbhs_all: %s %s" % (len(lbhs_all), + [x.dsa_dnstr for x in lbhs_all])) + + # MS-TECH says to compute lbhs_avail but then doesn't use it + # lbhs_avail = self.get_all_bridgeheads(lsite, part, transport, + # partial_ok, detect_failed) + + # FOR each nTDSConnection object cn such that the parent of cn is + # a DC in lbhsAll and cn!fromServer references a DC in rbhsAll + for ldsa in lbhs_all: + for cn in ldsa.connect_table.values(): + + rdsa = rbh_table.get(cn.from_dnstr) + if rdsa is None: + continue + + debug.DEBUG_DARK_YELLOW("rdsa is %s" % rdsa.dsa_dnstr) + # IF bit NTDSCONN_OPT_IS_GENERATED is set in cn!options and + # NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options and + # cn!transportType references t + if ((cn.is_generated() and + not cn.is_rodc_topology() and + cn.transport_guid == transport.guid)): + + # IF bit NTDSCONN_OPT_USER_OWNED_SCHEDULE is clear in + # cn!options and cn!schedule != sch + # Perform an originating update to set cn!schedule to + # sched + if ((not cn.is_user_owned_schedule() and + not cn.is_equivalent_schedule(link_sched))): + cn.schedule = link_sched + cn.set_modified(True) + + # IF bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and + # NTDSCONN_OPT_USE_NOTIFY are set in cn + if cn.is_override_notify_default() and \ + cn.is_use_notify(): + + # IF bit NTDSSITELINK_OPT_USE_NOTIFY is clear in + # ri.Options + # Perform an originating update to clear bits + # NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and + # NTDSCONN_OPT_USE_NOTIFY in cn!options + if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) == 0: + cn.options &= \ + ~(dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT | + dsdb.NTDSCONN_OPT_USE_NOTIFY) + cn.set_modified(True) + + # ELSE + else: + + # IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in + # ri.Options + # Perform an originating update to set bits + # NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and + # NTDSCONN_OPT_USE_NOTIFY in cn!options + if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) != 0: + cn.options |= \ + (dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT | + dsdb.NTDSCONN_OPT_USE_NOTIFY) + cn.set_modified(True) + + # IF bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options + if cn.is_twoway_sync(): + + # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is clear in + # ri.Options + # Perform an originating update to clear bit + # NTDSCONN_OPT_TWOWAY_SYNC in cn!options + if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) == 0: + cn.options &= ~dsdb.NTDSCONN_OPT_TWOWAY_SYNC + cn.set_modified(True) + + # ELSE + else: + + # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in + # ri.Options + # Perform an originating update to set bit + # NTDSCONN_OPT_TWOWAY_SYNC in cn!options + if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) != 0: + cn.options |= dsdb.NTDSCONN_OPT_TWOWAY_SYNC + cn.set_modified(True) + + # IF bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION is set + # in cn!options + if cn.is_intersite_compression_disabled(): + + # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is clear + # in ri.Options + # Perform an originating update to clear bit + # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in + # cn!options + if ((link_opt & + dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) == 0): + cn.options &= \ + ~dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION + cn.set_modified(True) + + # ELSE + else: + # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in + # ri.Options + # Perform an originating update to set bit + # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in + # cn!options + if ((link_opt & + dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) != 0): + cn.options |= \ + dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION + cn.set_modified(True) + + # Display any modified connection + if self.readonly or ldsa.is_ro(): + if cn.to_be_modified: + logger.info("TO BE MODIFIED:\n%s" % cn) + + ldsa.commit_connections(self.samdb, ro=True) + else: + ldsa.commit_connections(self.samdb) + # ENDFOR + + valid_connections = 0 + + # FOR each nTDSConnection object cn such that cn!parent is + # a DC in lbhsAll and cn!fromServer references a DC in rbhsAll + for ldsa in lbhs_all: + for cn in ldsa.connect_table.values(): + + rdsa = rbh_table.get(cn.from_dnstr) + if rdsa is None: + continue + + debug.DEBUG_DARK_YELLOW("round 2: rdsa is %s" % rdsa.dsa_dnstr) + + # IF (bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options or + # cn!transportType references t) and + # NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options + if (((not cn.is_generated() or + cn.transport_guid == transport.guid) and + not cn.is_rodc_topology())): + + # LET rguid be the objectGUID of the nTDSDSA object + # referenced by cn!fromServer + # LET lguid be (cn!parent)!objectGUID + + # IF BridgeheadDCFailed(rguid, detectFailedDCs) = FALSE and + # BridgeheadDCFailed(lguid, detectFailedDCs) = FALSE + # Increment cValidConnections by 1 + if ((not self.is_bridgehead_failed(rdsa, detect_failed) and + not self.is_bridgehead_failed(ldsa, detect_failed))): + valid_connections += 1 + + # IF keepConnections does not contain cn!objectGUID + # APPEND cn!objectGUID to keepConnections + self.kept_connections.add(cn) + + # ENDFOR + debug.DEBUG_RED("valid connections %d" % valid_connections) + DEBUG("kept_connections:\n%s" % (self.kept_connections,)) + # IF cValidConnections = 0 + if valid_connections == 0: + + # LET opt be NTDSCONN_OPT_IS_GENERATED + opt = dsdb.NTDSCONN_OPT_IS_GENERATED + + # IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in ri.Options + # SET bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and + # NTDSCONN_OPT_USE_NOTIFY in opt + if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) != 0: + opt |= (dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT | + dsdb.NTDSCONN_OPT_USE_NOTIFY) + + # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in ri.Options + # SET bit NTDSCONN_OPT_TWOWAY_SYNC opt + if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) != 0: + opt |= dsdb.NTDSCONN_OPT_TWOWAY_SYNC + + # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in + # ri.Options + # SET bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in opt + if ((link_opt & + dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) != 0): + opt |= dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION + + # Perform an originating update to create a new nTDSConnection + # object cn that is a child of lbh, cn!enabledConnection = TRUE, + # cn!options = opt, cn!transportType is a reference to t, + # cn!fromServer is a reference to rbh, and cn!schedule = sch + DEBUG_FN("new connection, KCC dsa: %s" % self.my_dsa.dsa_dnstr) + system_flags = (dsdb.SYSTEM_FLAG_CONFIG_ALLOW_RENAME | + dsdb.SYSTEM_FLAG_CONFIG_ALLOW_MOVE) + + cn = lbh.new_connection(opt, system_flags, transport, + rbh.dsa_dnstr, link_sched) + + # Display any added connection + if self.readonly or lbh.is_ro(): + if cn.to_be_added: + logger.info("TO BE ADDED:\n%s" % cn) + + lbh.commit_connections(self.samdb, ro=True) + else: + lbh.commit_connections(self.samdb) + + # APPEND cn!objectGUID to keepConnections + self.kept_connections.add(cn) + + def add_transports(self, vertex, local_vertex, graph, detect_failed): + """Build a Vertex's transport lists + + Each vertex has accept_red_red and accept_black lists that + list what transports they accept under various conditions. The + only transport that is ever accepted is IP, and a dummy extra + transport called "EDGE_TYPE_ALL". + + Part of MS-ADTS 6.2.2.3.4.3 -- ColorVertices + + :param vertex: the remote vertex we are thinking about + :param local_vertex: the vertex relating to the local site. + :param graph: the intersite graph + :param detect_failed: whether to detect failed links + :return: True if some bridgeheads were not found + """ + # The docs ([MS-ADTS] 6.2.2.3.4.3) say to use local_vertex + # here, but using vertex seems to make more sense. That is, + # the docs want this: + # + # bh = self.get_bridgehead(local_vertex.site, vertex.part, transport, + # local_vertex.is_black(), detect_failed) + # + # TODO WHY????? + + vertex.accept_red_red = [] + vertex.accept_black = [] + found_failed = False + + if vertex in graph.connected_vertices: + t_guid = str(self.ip_transport.guid) + + bh = self.get_bridgehead(vertex.site, vertex.part, + self.ip_transport, + vertex.is_black(), detect_failed) + if bh is None: + if vertex.site.is_rodc_site(): + vertex.accept_red_red.append(t_guid) + else: + found_failed = True + else: + vertex.accept_red_red.append(t_guid) + vertex.accept_black.append(t_guid) + + # Add additional transport to ensure another run of Dijkstra + vertex.accept_red_red.append("EDGE_TYPE_ALL") + vertex.accept_black.append("EDGE_TYPE_ALL") + + return found_failed + + def create_connections(self, graph, part, detect_failed): + """Create intersite NTDSConnections as needed by a partition + + Construct an NC replica graph for the NC identified by + the given crossRef, then create any additional nTDSConnection + objects required. + + :param graph: site graph. + :param part: crossRef object for NC. + :param detect_failed: True to detect failed DCs and route + replication traffic around them, False to assume no DC + has failed. + + Modifies self.kept_connections by adding any connections + deemed to be "in use". + + :return: (all_connected, found_failed_dc) + (all_connected) True if the resulting NC replica graph + connects all sites that need to be connected. + (found_failed_dc) True if one or more failed DCs were + detected. + """ + all_connected = True + found_failed = False + + DEBUG_FN("create_connections(): enter\n" + "\tpartdn=%s\n\tdetect_failed=%s" % + (part.nc_dnstr, detect_failed)) + + # XXX - This is a highly abbreviated function from the MS-TECH + # ref. It creates connections between bridgeheads to all + # sites that have appropriate replicas. Thus we are not + # creating a minimum cost spanning tree but instead + # producing a fully connected tree. This should produce + # a full (albeit not optimal cost) replication topology. + + my_vertex = Vertex(self.my_site, part) + my_vertex.color_vertex() + + for v in graph.vertices: + v.color_vertex() + if self.add_transports(v, my_vertex, graph, detect_failed): + found_failed = True + + # No NC replicas for this NC in the site of the local DC, + # so no nTDSConnection objects need be created + if my_vertex.is_white(): + return all_connected, found_failed + + edge_list, n_components = get_spanning_tree_edges(graph, + self.my_site, + label=part.partstr) + + DEBUG_FN("%s Number of components: %d" % + (part.nc_dnstr, n_components)) + if n_components > 1: + all_connected = False + + # LET partialReplicaOkay be TRUE if and only if + # localSiteVertex.Color = COLOR.BLACK + partial_ok = my_vertex.is_black() + + # Utilize the IP transport only for now + transport = self.ip_transport + + DEBUG("edge_list %s" % edge_list) + for e in edge_list: + # XXX more accurate comparison? + if e.directed and e.vertices[0].site is self.my_site: + continue + + if e.vertices[0].site is self.my_site: + rsite = e.vertices[1].site + else: + rsite = e.vertices[0].site + + # We don't make connections to our own site as that + # is intrasite topology generator's job + if rsite is self.my_site: + DEBUG("rsite is my_site") + continue + + # Determine bridgehead server in remote site + rbh = self.get_bridgehead(rsite, part, transport, + partial_ok, detect_failed) + if rbh is None: + continue + + # RODC acts as an BH for itself + # IF AmIRODC() then + # LET lbh be the nTDSDSA object of the local DC + # ELSE + # LET lbh be the result of GetBridgeheadDC(localSiteVertex.ID, + # cr, t, partialReplicaOkay, detectFailedDCs) + if self.my_dsa.is_ro(): + lsite = self.my_site + lbh = self.my_dsa + else: + lsite = self.my_site + lbh = self.get_bridgehead(lsite, part, transport, + partial_ok, detect_failed) + # TODO + if lbh is None: + debug.DEBUG_RED("DISASTER! lbh is None") + return False, True + + DEBUG_FN("lsite: %s\nrsite: %s" % (lsite, rsite)) + DEBUG_FN("vertices %s" % (e.vertices,)) + debug.DEBUG_BLUE("bridgeheads\n%s\n%s\n%s" % (lbh, rbh, "-" * 70)) + + sitelink = e.site_link + if sitelink is None: + link_opt = 0x0 + link_sched = None + else: + link_opt = sitelink.options + link_sched = sitelink.schedule + + self.create_connection(part, rbh, rsite, transport, + lbh, lsite, link_opt, link_sched, + partial_ok, detect_failed) + + return all_connected, found_failed + + def create_intersite_connections(self): + """Create NTDSConnections as necessary for all partitions. + + Computes an NC replica graph for each NC replica that "should be + present" on the local DC or "is present" on any DC in the same site + as the local DC. For each edge directed to an NC replica on such a + DC from an NC replica on a DC in another site, the KCC creates an + nTDSConnection object to imply that edge if one does not already + exist. + + Modifies self.kept_connections - A set of nTDSConnection + objects for edges that are directed + to the local DC's site in one or more NC replica graphs. + + :return: True if spanning trees were created for all NC replica + graphs, otherwise False. + """ + all_connected = True + self.kept_connections = set() + + # LET crossRefList be the set containing each object o of class + # crossRef such that o is a child of the CN=Partitions child of the + # config NC + + # FOR each crossRef object cr in crossRefList + # IF cr!enabled has a value and is false, or if FLAG_CR_NTDS_NC + # is clear in cr!systemFlags, skip cr. + # LET g be the GRAPH return of SetupGraph() + + for part in self.part_table.values(): + + if not part.is_enabled(): + continue + + if part.is_foreign(): + continue + + graph = self.setup_graph(part) + + # Create nTDSConnection objects, routing replication traffic + # around "failed" DCs. + found_failed = False + + connected, found_failed = self.create_connections(graph, + part, True) + + DEBUG("with detect_failed: connected %s Found failed %s" % + (connected, found_failed)) + if not connected: + all_connected = False + + if found_failed: + # One or more failed DCs preclude use of the ideal NC + # replica graph. Add connections for the ideal graph. + self.create_connections(graph, part, False) + + return all_connected + + def intersite(self, ping): + """Generate the inter-site KCC replica graph and nTDSConnections + + As per MS-ADTS 6.2.2.3. + + If self.readonly is False, the connections are added to self.samdb. + + Produces self.kept_connections which is a set of NTDS + Connections that should be kept during subsequent pruning + process. + + After this has run, all sites should be connected in a minimum + spanning tree. + + :param ping: An oracle function of remote site availability + :return (True or False): (True) if the produced NC replica + graph connects all sites that need to be connected + """ + + # Retrieve my DSA + mydsa = self.my_dsa + mysite = self.my_site + all_connected = True + + DEBUG_FN("intersite(): enter") + + # Determine who is the ISTG + if self.readonly: + mysite.select_istg(self.samdb, mydsa, ro=True) + else: + mysite.select_istg(self.samdb, mydsa, ro=False) + + # Test whether local site has topology disabled + if mysite.is_intersite_topology_disabled(): + DEBUG_FN("intersite(): exit disabled all_connected=%d" % + all_connected) + return all_connected + + if not mydsa.is_istg(): + DEBUG_FN("intersite(): exit not istg all_connected=%d" % + all_connected) + return all_connected + + self.merge_failed_links(ping) + + # For each NC with an NC replica that "should be present" on the + # local DC or "is present" on any DC in the same site as the + # local DC, the KCC constructs a site graph--a precursor to an NC + # replica graph. The site connectivity for a site graph is defined + # by objects of class interSiteTransport, siteLink, and + # siteLinkBridge in the config NC. + + all_connected = self.create_intersite_connections() + + DEBUG_FN("intersite(): exit all_connected=%d" % all_connected) + return all_connected + + # This function currently does no actions. The reason being that we cannot + # perform modifies in this way on the RODC. + def update_rodc_connection(self, ro=True): + """Updates the RODC NTFRS connection object. + + If the local DSA is not an RODC, this does nothing. + """ + if not self.my_dsa.is_ro(): + return + + # Given an nTDSConnection object cn1, such that cn1.options contains + # NTDSCONN_OPT_RODC_TOPOLOGY, and another nTDSConnection object cn2, + # does not contain NTDSCONN_OPT_RODC_TOPOLOGY, modify cn1 to ensure + # that the following is true: + # + # cn1.fromServer = cn2.fromServer + # cn1.schedule = cn2.schedule + # + # If no such cn2 can be found, cn1 is not modified. + # If no such cn1 can be found, nothing is modified by this task. + + all_connections = self.my_dsa.connect_table.values() + ro_connections = [x for x in all_connections if x.is_rodc_topology()] + rw_connections = [x for x in all_connections + if x not in ro_connections] + + # XXX here we are dealing with multiple RODC_TOPO connections, + # if they exist. It is not clear whether the spec means that + # or if it ever arises. + if rw_connections and ro_connections: + for con in ro_connections: + cn2 = rw_connections[0] + con.from_dnstr = cn2.from_dnstr + con.schedule = cn2.schedule + con.to_be_modified = True + + self.my_dsa.commit_connections(self.samdb, ro=ro) + + def intrasite_max_node_edges(self, node_count): + """Find the maximum number of edges directed to an intrasite node + + The KCC does not create more than 50 edges directed to a + single DC. To optimize replication, we compute that each node + should have n+2 total edges directed to it such that (n) is + the smallest non-negative integer satisfying + (node_count <= 2*(n*n) + 6*n + 7) + + (If the number of edges is m (i.e. n + 2), that is the same as + 2 * m*m - 2 * m + 3). We think in terms of n because that is + the number of extra connections over the double directed ring + that exists by default. + + edges n nodecount + 2 0 7 + 3 1 15 + 4 2 27 + 5 3 43 + ... + 50 48 4903 + + :param node_count: total number of nodes in the replica graph + + The intention is that there should be no more than 3 hops + between any two DSAs at a site. With up to 7 nodes the 2 edges + of the ring are enough; any configuration of extra edges with + 8 nodes will be enough. It is less clear that the 3 hop + guarantee holds at e.g. 15 nodes in degenerate cases, but + those are quite unlikely given the extra edges are randomly + arranged. + + :param node_count: the number of nodes in the site + "return: The desired maximum number of connections + """ + n = 0 + while True: + if node_count <= (2 * (n * n) + (6 * n) + 7): + break + n = n + 1 + n = n + 2 + if n < 50: + return n + return 50 + + def construct_intrasite_graph(self, site_local, dc_local, + nc_x, gc_only, detect_stale): + """Create an intrasite graph using given parameters + + This might be called a number of times per site with different + parameters. + + Based on [MS-ADTS] 6.2.2.2 + + :param site_local: site for which we are working + :param dc_local: local DC that potentially needs a replica + :param nc_x: naming context (x) that we are testing if it + "should be present" on the local DC + :param gc_only: Boolean - only consider global catalog servers + :param detect_stale: Boolean - check whether links seems down + :return: None + """ + # We're using the MS notation names here to allow + # correlation back to the published algorithm. + # + # nc_x - naming context (x) that we are testing if it + # "should be present" on the local DC + # f_of_x - replica (f) found on a DC (s) for NC (x) + # dc_s - DC where f_of_x replica was found + # dc_local - local DC that potentially needs a replica + # (f_of_x) + # r_list - replica list R + # p_of_x - replica (p) is partial and found on a DC (s) + # for NC (x) + # l_of_x - replica (l) is the local replica for NC (x) + # that should appear on the local DC + # r_len = is length of replica list |R| + # + # If the DSA doesn't need a replica for this + # partition (NC x) then continue + needed, ro, partial = nc_x.should_be_present(dc_local) + + debug.DEBUG_YELLOW("construct_intrasite_graph(): enter" + + "\n\tgc_only=%d" % gc_only + + "\n\tdetect_stale=%d" % detect_stale + + "\n\tneeded=%s" % needed + + "\n\tro=%s" % ro + + "\n\tpartial=%s" % partial + + "\n%s" % nc_x) + + if not needed: + debug.DEBUG_RED("%s lacks 'should be present' status, " + "aborting construct_intrasite_graph!" % + nc_x.nc_dnstr) + return + + # Create a NCReplica that matches what the local replica + # should say. We'll use this below in our r_list + l_of_x = NCReplica(dc_local, nc_x.nc_dnstr) + + l_of_x.identify_by_basedn(self.samdb) + + l_of_x.rep_partial = partial + l_of_x.rep_ro = ro + + # Add this replica that "should be present" to the + # needed replica table for this DSA + dc_local.add_needed_replica(l_of_x) + + # Replica list + # + # Let R be a sequence containing each writable replica f of x + # such that f "is present" on a DC s satisfying the following + # criteria: + # + # * s is a writable DC other than the local DC. + # + # * s is in the same site as the local DC. + # + # * If x is a read-only full replica and x is a domain NC, + # then the DC's functional level is at least + # DS_BEHAVIOR_WIN2008. + # + # * Bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED is set + # in the options attribute of the site settings object for + # the local DC's site, or no tuple z exists in the + # kCCFailedLinks or kCCFailedConnections variables such + # that z.UUIDDsa is the objectGUID of the nTDSDSA object + # for s, z.FailureCount > 0, and the current time - + # z.TimeFirstFailure > 2 hours. + + r_list = [] + + # We'll loop thru all the DSAs looking for + # writeable NC replicas that match the naming + # context dn for (nc_x) + # + for dc_s in self.my_site.dsa_table.values(): + # If this partition (nc_x) doesn't appear as a + # replica (f_of_x) on (dc_s) then continue + if nc_x.nc_dnstr not in dc_s.current_rep_table: + continue + + # Pull out the NCReplica (f) of (x) with the dn + # that matches NC (x) we are examining. + f_of_x = dc_s.current_rep_table[nc_x.nc_dnstr] + + # Replica (f) of NC (x) must be writable + if f_of_x.is_ro(): + continue + + # Replica (f) of NC (x) must satisfy the + # "is present" criteria for DC (s) that + # it was found on + if not f_of_x.is_present(): + continue + + # DC (s) must be a writable DSA other than + # my local DC. In other words we'd only replicate + # from other writable DC + if dc_s.is_ro() or dc_s is dc_local: + continue + + # Certain replica graphs are produced only + # for global catalogs, so test against + # method input parameter + if gc_only and not dc_s.is_gc(): + continue + + # DC (s) must be in the same site as the local DC + # as this is the intra-site algorithm. This is + # handled by virtue of placing DSAs in per + # site objects (see enclosing for() loop) + + # If NC (x) is intended to be read-only full replica + # for a domain NC on the target DC then the source + # DC should have functional level at minimum WIN2008 + # + # Effectively we're saying that in order to replicate + # to a targeted RODC (which was introduced in Windows 2008) + # then we have to replicate from a DC that is also minimally + # at that level. + # + # You can also see this requirement in the MS special + # considerations for RODC which state that to deploy + # an RODC, at least one writable domain controller in + # the domain must be running Windows Server 2008 + if ro and not partial and nc_x.nc_type == NCType.domain: + if not dc_s.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008): + continue + + # If we haven't been told to turn off stale connection + # detection and this dsa has a stale connection then + # continue + if detect_stale and self.is_stale_link_connection(dc_s): + continue + + # Replica meets criteria. Add it to table indexed + # by the GUID of the DC that it appears on + r_list.append(f_of_x) + + # If a partial (not full) replica of NC (x) "should be present" + # on the local DC, append to R each partial replica (p of x) + # such that p "is present" on a DC satisfying the same + # criteria defined above for full replica DCs. + # + # XXX This loop and the previous one differ only in whether + # the replica is partial or not. here we only accept partial + # (because we're partial); before we only accepted full. Order + # doesn't matter (the list is sorted a few lines down) so these + # loops could easily be merged. Or this could be a helper + # function. + + if partial: + # Now we loop thru all the DSAs looking for + # partial NC replicas that match the naming + # context dn for (NC x) + for dc_s in self.my_site.dsa_table.values(): + + # If this partition NC (x) doesn't appear as a + # replica (p) of NC (x) on the dsa DC (s) then + # continue + if nc_x.nc_dnstr not in dc_s.current_rep_table: + continue + + # Pull out the NCReplica with the dn that + # matches NC (x) we are examining. + p_of_x = dc_s.current_rep_table[nc_x.nc_dnstr] + + # Replica (p) of NC (x) must be partial + if not p_of_x.is_partial(): + continue + + # Replica (p) of NC (x) must satisfy the + # "is present" criteria for DC (s) that + # it was found on + if not p_of_x.is_present(): + continue + + # DC (s) must be a writable DSA other than + # my DSA. In other words we'd only replicate + # from other writable DSA + if dc_s.is_ro() or dc_s is dc_local: + continue + + # Certain replica graphs are produced only + # for global catalogs, so test against + # method input parameter + if gc_only and not dc_s.is_gc(): + continue + + # If we haven't been told to turn off stale connection + # detection and this dsa has a stale connection then + # continue + if detect_stale and self.is_stale_link_connection(dc_s): + continue + + # Replica meets criteria. Add it to table indexed + # by the GUID of the DSA that it appears on + r_list.append(p_of_x) + + # Append to R the NC replica that "should be present" + # on the local DC + r_list.append(l_of_x) + + r_list.sort(key=lambda rep: ndr_pack(rep.rep_dsa_guid)) + r_len = len(r_list) + + max_node_edges = self.intrasite_max_node_edges(r_len) + + # Add a node for each r_list element to the replica graph + graph_list = [] + for rep in r_list: + node = GraphNode(rep.rep_dsa_dnstr, max_node_edges) + graph_list.append(node) + + # For each r(i) from (0 <= i < |R|-1) + i = 0 + while i < (r_len - 1): + # Add an edge from r(i) to r(i+1) if r(i) is a full + # replica or r(i+1) is a partial replica + if not r_list[i].is_partial() or r_list[i +1].is_partial(): + graph_list[i + 1].add_edge_from(r_list[i].rep_dsa_dnstr) + + # Add an edge from r(i+1) to r(i) if r(i+1) is a full + # replica or ri is a partial replica. + if not r_list[i + 1].is_partial() or r_list[i].is_partial(): + graph_list[i].add_edge_from(r_list[i + 1].rep_dsa_dnstr) + i = i + 1 + + # Add an edge from r|R|-1 to r0 if r|R|-1 is a full replica + # or r0 is a partial replica. + if not r_list[r_len - 1].is_partial() or r_list[0].is_partial(): + graph_list[0].add_edge_from(r_list[r_len - 1].rep_dsa_dnstr) + + # Add an edge from r0 to r|R|-1 if r0 is a full replica or + # r|R|-1 is a partial replica. + if not r_list[0].is_partial() or r_list[r_len -1].is_partial(): + graph_list[r_len - 1].add_edge_from(r_list[0].rep_dsa_dnstr) + + DEBUG("r_list is length %s" % len(r_list)) + DEBUG('\n'.join(str((x.rep_dsa_guid, x.rep_dsa_dnstr)) + for x in r_list)) + + do_dot_files = self.dot_file_dir is not None and self.debug + if self.verify or do_dot_files: + dot_edges = [] + dot_vertices = set() + for v1 in graph_list: + dot_vertices.add(v1.dsa_dnstr) + for v2 in v1.edge_from: + dot_edges.append((v2, v1.dsa_dnstr)) + dot_vertices.add(v2) + + verify_properties = ('connected',) + verify_and_dot('intrasite_pre_ntdscon', dot_edges, dot_vertices, + label='%s__%s__%s' % (site_local.site_dnstr, + nctype_lut[nc_x.nc_type], + nc_x.nc_dnstr), + properties=verify_properties, debug=DEBUG, + verify=self.verify, + dot_file_dir=self.dot_file_dir, + directed=True) + + rw_dot_vertices = set(x for x in dot_vertices + if not self.get_dsa(x).is_ro()) + rw_dot_edges = [(a, b) for a, b in dot_edges if + a in rw_dot_vertices and b in rw_dot_vertices] + rw_verify_properties = ('connected', + 'directed_double_ring_or_small') + verify_and_dot('intrasite_rw_pre_ntdscon', rw_dot_edges, + rw_dot_vertices, + label='%s__%s__%s' % (site_local.site_dnstr, + nctype_lut[nc_x.nc_type], + nc_x.nc_dnstr), + properties=rw_verify_properties, debug=DEBUG, + verify=self.verify, + dot_file_dir=self.dot_file_dir, + directed=True) + + # For each existing nTDSConnection object implying an edge + # from rj of R to ri such that j != i, an edge from rj to ri + # is not already in the graph, and the total edges directed + # to ri is less than n+2, the KCC adds that edge to the graph. + for vertex in graph_list: + dsa = self.my_site.dsa_table[vertex.dsa_dnstr] + for connect in dsa.connect_table.values(): + remote = connect.from_dnstr + if remote in self.my_site.dsa_table: + vertex.add_edge_from(remote) + + DEBUG('reps are: %s' % ' '.join(x.rep_dsa_dnstr for x in r_list)) + DEBUG('dsas are: %s' % ' '.join(x.dsa_dnstr for x in graph_list)) + + for tnode in graph_list: + # To optimize replication latency in sites with many NC + # replicas, the KCC adds new edges directed to ri to bring + # the total edges to n+2, where the NC replica rk of R + # from which the edge is directed is chosen at random such + # that k != i and an edge from rk to ri is not already in + # the graph. + # + # Note that the KCC tech ref does not give a number for + # the definition of "sites with many NC replicas". At a + # bare minimum to satisfy n+2 edges directed at a node we + # have to have at least three replicas in |R| (i.e. if n + # is zero then at least replicas from two other graph + # nodes may direct edges to us). + if r_len >= 3 and not tnode.has_sufficient_edges(): + candidates = [x for x in graph_list if + (x is not tnode and + x.dsa_dnstr not in tnode.edge_from)] + + debug.DEBUG_BLUE("looking for random link for %s. r_len %d, " + "graph len %d candidates %d" + % (tnode.dsa_dnstr, r_len, len(graph_list), + len(candidates))) + + DEBUG("candidates %s" % [x.dsa_dnstr for x in candidates]) + + while candidates and not tnode.has_sufficient_edges(): + other = random.choice(candidates) + DEBUG("trying to add candidate %s" % other.dsa_dnstr) + if not tnode.add_edge_from(other.dsa_dnstr): + debug.DEBUG_RED("could not add %s" % other.dsa_dnstr) + candidates.remove(other) + else: + DEBUG_FN("not adding links to %s: nodes %s, links is %s/%s" % + (tnode.dsa_dnstr, r_len, len(tnode.edge_from), + tnode.max_edges)) + + # Print the graph node in debug mode + DEBUG_FN("%s" % tnode) + + # For each edge directed to the local DC, ensure a nTDSConnection + # points to us that satisfies the KCC criteria + + if tnode.dsa_dnstr == dc_local.dsa_dnstr: + tnode.add_connections_from_edges(dc_local, self.ip_transport) + + if self.verify or do_dot_files: + dot_edges = [] + dot_vertices = set() + for v1 in graph_list: + dot_vertices.add(v1.dsa_dnstr) + for v2 in v1.edge_from: + dot_edges.append((v2, v1.dsa_dnstr)) + dot_vertices.add(v2) + + verify_properties = ('connected',) + verify_and_dot('intrasite_post_ntdscon', dot_edges, dot_vertices, + label='%s__%s__%s' % (site_local.site_dnstr, + nctype_lut[nc_x.nc_type], + nc_x.nc_dnstr), + properties=verify_properties, debug=DEBUG, + verify=self.verify, + dot_file_dir=self.dot_file_dir, + directed=True) + + rw_dot_vertices = set(x for x in dot_vertices + if not self.get_dsa(x).is_ro()) + rw_dot_edges = [(a, b) for a, b in dot_edges if + a in rw_dot_vertices and b in rw_dot_vertices] + rw_verify_properties = ('connected', + 'directed_double_ring_or_small') + verify_and_dot('intrasite_rw_post_ntdscon', rw_dot_edges, + rw_dot_vertices, + label='%s__%s__%s' % (site_local.site_dnstr, + nctype_lut[nc_x.nc_type], + nc_x.nc_dnstr), + properties=rw_verify_properties, debug=DEBUG, + verify=self.verify, + dot_file_dir=self.dot_file_dir, + directed=True) + + def intrasite(self): + """Generate the intrasite KCC connections + + As per MS-ADTS 6.2.2.2. + + If self.readonly is False, the connections are added to self.samdb. + + After this call, all DCs in each site with more than 3 DCs + should be connected in a bidirectional ring. If a site has 2 + DCs, they will bidirectionally connected. Sites with many DCs + may have arbitrary extra connections. + + :return: None + """ + mydsa = self.my_dsa + + DEBUG_FN("intrasite(): enter") + + # Test whether local site has topology disabled + mysite = self.my_site + if mysite.is_intrasite_topology_disabled(): + return + + detect_stale = (not mysite.is_detect_stale_disabled()) + for connect in mydsa.connect_table.values(): + if connect.to_be_added: + debug.DEBUG_CYAN("TO BE ADDED:\n%s" % connect) + + # Loop thru all the partitions, with gc_only False + for partdn, part in self.part_table.items(): + self.construct_intrasite_graph(mysite, mydsa, part, False, + detect_stale) + for connect in mydsa.connect_table.values(): + if connect.to_be_added: + debug.DEBUG_BLUE("TO BE ADDED:\n%s" % connect) + + # If the DC is a GC server, the KCC constructs an additional NC + # replica graph (and creates nTDSConnection objects) for the + # config NC as above, except that only NC replicas that "are present" + # on GC servers are added to R. + for connect in mydsa.connect_table.values(): + if connect.to_be_added: + debug.DEBUG_YELLOW("TO BE ADDED:\n%s" % connect) + + # Do it again, with gc_only True + for partdn, part in self.part_table.items(): + if part.is_config(): + self.construct_intrasite_graph(mysite, mydsa, part, True, + detect_stale) + + # The DC repeats the NC replica graph computation and nTDSConnection + # creation for each of the NC replica graphs, this time assuming + # that no DC has failed. It does so by re-executing the steps as + # if the bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED were + # set in the options attribute of the site settings object for + # the local DC's site. (ie. we set "detec_stale" flag to False) + for connect in mydsa.connect_table.values(): + if connect.to_be_added: + debug.DEBUG_BLUE("TO BE ADDED:\n%s" % connect) + + # Loop thru all the partitions. + for partdn, part in self.part_table.items(): + self.construct_intrasite_graph(mysite, mydsa, part, False, + False) # don't detect stale + + # If the DC is a GC server, the KCC constructs an additional NC + # replica graph (and creates nTDSConnection objects) for the + # config NC as above, except that only NC replicas that "are present" + # on GC servers are added to R. + for connect in mydsa.connect_table.values(): + if connect.to_be_added: + debug.DEBUG_RED("TO BE ADDED:\n%s" % connect) + + for partdn, part in self.part_table.items(): + if part.is_config(): + self.construct_intrasite_graph(mysite, mydsa, part, True, + False) # don't detect stale + + self._commit_changes(mydsa) + + def list_dsas(self): + """Compile a comprehensive list of DSA DNs + + These are all the DSAs on all the sites that KCC would be + dealing with. + + This method is not idempotent and may not work correctly in + sequence with KCC.run(). + + :return: a list of DSA DN strings. + """ + self.load_my_site() + self.load_my_dsa() + + self.load_all_sites() + self.load_all_partitions() + self.load_ip_transport() + self.load_all_sitelinks() + dsas = [] + for site in self.site_table.values(): + dsas.extend([dsa.dsa_dnstr.replace('CN=NTDS Settings,', '', 1) + for dsa in site.dsa_table.values()]) + return dsas + + def load_samdb(self, dburl, lp, creds, force=False): + """Load the database using an url, loadparm, and credentials + + If force is False, the samdb won't be reloaded if it already + exists. + + :param dburl: a database url. + :param lp: a loadparm object. + :param creds: a Credentials object. + :param force: a boolean indicating whether to overwrite. + + """ + if force or self.samdb is None: + try: + self.samdb = SamDB(url=dburl, + session_info=system_session(), + credentials=creds, lp=lp) + except ldb.LdbError as e1: + (num, msg) = e1.args + raise KCCError("Unable to open sam database %s : %s" % + (dburl, msg)) + + def plot_all_connections(self, basename, verify_properties=()): + """Helper function to plot and verify NTDSConnections + + :param basename: an identifying string to use in filenames and logs. + :param verify_properties: properties to verify (default empty) + """ + verify = verify_properties and self.verify + if not verify and self.dot_file_dir is None: + return + + dot_edges = [] + dot_vertices = [] + edge_colours = [] + vertex_colours = [] + + for dsa in self.dsa_by_dnstr.values(): + dot_vertices.append(dsa.dsa_dnstr) + if dsa.is_ro(): + vertex_colours.append('#cc0000') + else: + vertex_colours.append('#0000cc') + for con in dsa.connect_table.values(): + if con.is_rodc_topology(): + edge_colours.append('red') + else: + edge_colours.append('blue') + dot_edges.append((con.from_dnstr, dsa.dsa_dnstr)) + + verify_and_dot(basename, dot_edges, vertices=dot_vertices, + label=self.my_dsa_dnstr, + properties=verify_properties, debug=DEBUG, + verify=verify, dot_file_dir=self.dot_file_dir, + directed=True, edge_colors=edge_colours, + vertex_colors=vertex_colours) + + def run(self, dburl, lp, creds, forced_local_dsa=None, + forget_local_links=False, forget_intersite_links=False, + attempt_live_connections=False): + """Perform a KCC run, possibly updating repsFrom topology + + :param dburl: url of the database to work with. + :param lp: a loadparm object. + :param creds: a Credentials object. + :param forced_local_dsa: pretend to be on the DSA with this dn_str + :param forget_local_links: calculate as if no connections existed + (boolean, default False) + :param forget_intersite_links: calculate with only intrasite connection + (boolean, default False) + :param attempt_live_connections: attempt to connect to remote DSAs to + determine link availability (boolean, default False) + :return: 1 on error, 0 otherwise + """ + if self.samdb is None: + DEBUG_FN("samdb is None; let's load it from %s" % (dburl,)) + self.load_samdb(dburl, lp, creds, force=False) + + if forced_local_dsa: + self.samdb.set_ntds_settings_dn("CN=NTDS Settings,%s" % + forced_local_dsa) + + try: + # Setup + self.load_my_site() + self.load_my_dsa() + + self.load_all_sites() + self.load_all_partitions() + self.load_ip_transport() + self.load_all_sitelinks() + + if self.verify or self.dot_file_dir is not None: + guid_to_dnstr = {} + for site in self.site_table.values(): + guid_to_dnstr.update((str(dsa.dsa_guid), dnstr) + for dnstr, dsa + in site.dsa_table.items()) + + self.plot_all_connections('dsa_initial') + + dot_edges = [] + current_reps, needed_reps = self.my_dsa.get_rep_tables() + for dnstr, c_rep in current_reps.items(): + DEBUG("c_rep %s" % c_rep) + dot_edges.append((self.my_dsa.dsa_dnstr, dnstr)) + + verify_and_dot('dsa_repsFrom_initial', dot_edges, + directed=True, label=self.my_dsa_dnstr, + properties=(), debug=DEBUG, verify=self.verify, + dot_file_dir=self.dot_file_dir) + + dot_edges = [] + for site in self.site_table.values(): + for dsa in site.dsa_table.values(): + current_reps, needed_reps = dsa.get_rep_tables() + for dn_str, rep in current_reps.items(): + for reps_from in rep.rep_repsFrom: + DEBUG("rep %s" % rep) + dsa_guid = str(reps_from.source_dsa_obj_guid) + dsa_dn = guid_to_dnstr[dsa_guid] + dot_edges.append((dsa.dsa_dnstr, dsa_dn)) + + verify_and_dot('dsa_repsFrom_initial_all', dot_edges, + directed=True, label=self.my_dsa_dnstr, + properties=(), debug=DEBUG, verify=self.verify, + dot_file_dir=self.dot_file_dir) + + dot_edges = [] + dot_colours = [] + for link in self.sitelink_table.values(): + from hashlib import md5 + tmp_str = link.dnstr.encode('utf8') + colour = '#' + md5(tmp_str).hexdigest()[:6] + for a, b in itertools.combinations(link.site_list, 2): + dot_edges.append((a[1], b[1])) + dot_colours.append(colour) + properties = ('connected',) + verify_and_dot('dsa_sitelink_initial', dot_edges, + directed=False, + label=self.my_dsa_dnstr, properties=properties, + debug=DEBUG, verify=self.verify, + dot_file_dir=self.dot_file_dir, + edge_colors=dot_colours) + + if forget_local_links: + for dsa in self.my_site.dsa_table.values(): + dsa.connect_table = dict((k, v) for k, v in + dsa.connect_table.items() + if v.is_rodc_topology() or + (v.from_dnstr not in + self.my_site.dsa_table)) + self.plot_all_connections('dsa_forgotten_local') + + if forget_intersite_links: + for site in self.site_table.values(): + for dsa in site.dsa_table.values(): + dsa.connect_table = dict((k, v) for k, v in + dsa.connect_table.items() + if site is self.my_site and + v.is_rodc_topology()) + + self.plot_all_connections('dsa_forgotten_all') + + if attempt_live_connections: + # Encapsulates lp and creds in a function that + # attempts connections to remote DSAs. + def ping(self, dnsname): + try: + drs_utils.drsuapi_connect(dnsname, self.lp, self.creds) + except drs_utils.drsException: + return False + return True + else: + ping = None + # These are the published steps (in order) for the + # MS-TECH description of the KCC algorithm ([MS-ADTS] 6.2.2) + + # Step 1 + self.refresh_failed_links_connections(ping) + + # Step 2 + self.intrasite() + + # Step 3 + all_connected = self.intersite(ping) + + # Step 4 + self.remove_unneeded_ntdsconn(all_connected) + + # Step 5 + self.translate_ntdsconn() + + # Step 6 + self.remove_unneeded_failed_links_connections() + + # Step 7 + self.update_rodc_connection() + + if self.verify or self.dot_file_dir is not None: + self.plot_all_connections('dsa_final', + ('connected',)) + + debug.DEBUG_MAGENTA("there are %d dsa guids" % + len(guid_to_dnstr)) + + dot_edges = [] + edge_colors = [] + my_dnstr = self.my_dsa.dsa_dnstr + current_reps, needed_reps = self.my_dsa.get_rep_tables() + for dnstr, n_rep in needed_reps.items(): + for reps_from in n_rep.rep_repsFrom: + guid_str = str(reps_from.source_dsa_obj_guid) + dot_edges.append((my_dnstr, guid_to_dnstr[guid_str])) + edge_colors.append('#' + str(n_rep.nc_guid)[:6]) + + verify_and_dot('dsa_repsFrom_final', dot_edges, directed=True, + label=self.my_dsa_dnstr, + properties=(), debug=DEBUG, verify=self.verify, + dot_file_dir=self.dot_file_dir, + edge_colors=edge_colors) + + dot_edges = [] + + for site in self.site_table.values(): + for dsa in site.dsa_table.values(): + current_reps, needed_reps = dsa.get_rep_tables() + for n_rep in needed_reps.values(): + for reps_from in n_rep.rep_repsFrom: + dsa_guid = str(reps_from.source_dsa_obj_guid) + dsa_dn = guid_to_dnstr[dsa_guid] + dot_edges.append((dsa.dsa_dnstr, dsa_dn)) + + verify_and_dot('dsa_repsFrom_final_all', dot_edges, + directed=True, label=self.my_dsa_dnstr, + properties=(), debug=DEBUG, verify=self.verify, + dot_file_dir=self.dot_file_dir) + + except: + raise + + return 0 + + def import_ldif(self, dburl, lp, ldif_file, forced_local_dsa=None): + """Import relevant objects and attributes from an LDIF file. + + The point of this function is to allow a programmer/debugger to + import an LDIF file with non-security relevant information that + was previously extracted from a DC database. The LDIF file is used + to create a temporary abbreviated database. The KCC algorithm can + then run against this abbreviated database for debug or test + verification that the topology generated is computationally the + same between different OSes and algorithms. + + :param dburl: path to the temporary abbreviated db to create + :param lp: a loadparm object. + :param ldif_file: path to the ldif file to import + :param forced_local_dsa: perform KCC from this DSA's point of view + :return: zero on success, 1 on error + """ + try: + self.samdb = ldif_import_export.ldif_to_samdb(dburl, lp, ldif_file, + forced_local_dsa) + except ldif_import_export.LdifError as e: + logger.critical(e) + return 1 + return 0 + + def export_ldif(self, dburl, lp, creds, ldif_file): + """Save KCC relevant details to an ldif file + + The point of this function is to allow a programmer/debugger to + extract an LDIF file with non-security relevant information from + a DC database. The LDIF file can then be used to "import" via + the import_ldif() function this file into a temporary abbreviated + database. The KCC algorithm can then run against this abbreviated + database for debug or test verification that the topology generated + is computationally the same between different OSes and algorithms. + + :param dburl: LDAP database URL to extract info from + :param lp: a loadparm object. + :param cred: a Credentials object. + :param ldif_file: output LDIF file name to create + :return: zero on success, 1 on error + """ + try: + ldif_import_export.samdb_to_ldif_file(self.samdb, dburl, lp, creds, + ldif_file) + except ldif_import_export.LdifError as e: + logger.critical(e) + return 1 + return 0 diff --git a/python/samba/kcc/debug.py b/python/samba/kcc/debug.py new file mode 100644 index 0000000..8a69bde --- /dev/null +++ b/python/samba/kcc/debug.py @@ -0,0 +1,61 @@ +# Debug utilities for samba_kcc +# +# Copyright (C) Andrew Bartlett 2015 +# +# Although Andrew Bartlett owns the copyright, the actual work was +# performed by Douglas Bagnall and Garming Sam. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import sys +import logging +from functools import partial +import traceback + +logger = logging.getLogger("samba_kcc") +logger.addHandler(logging.StreamHandler(sys.stdout)) +DEBUG = logger.debug +WARN = logger.warning + + +# colours for prettier logs +from samba.colour import C_NORMAL, REV_RED +from samba.colour import DARK_RED, RED +from samba.colour import DARK_GREEN, GREEN +from samba.colour import DARK_YELLOW, YELLOW +from samba.colour import DARK_BLUE, BLUE +from samba.colour import PURPLE, MAGENTA +from samba.colour import DARK_CYAN, CYAN +from samba.colour import GREY, WHITE + + +def _color_debug(*args, **kwargs): + DEBUG('%s%s%s' % (kwargs['color'], args[0], C_NORMAL), *args[1:]) + + +_globals = globals() +for _color in ('DARK_RED', 'RED', 'DARK_GREEN', 'GREEN', 'YELLOW', + 'DARK_YELLOW', 'DARK_BLUE', 'BLUE', 'PURPLE', 'MAGENTA', + 'DARK_CYAN', 'CYAN', 'GREY', 'WHITE', 'REV_RED'): + _globals['DEBUG_' + _color] = partial(_color_debug, color=_globals[_color]) + + +def DEBUG_FN(msg=''): + filename, lineno, function, text = traceback.extract_stack(None, 2)[0] + DEBUG("%s%s:%s%s %s%s()%s '%s'" % (CYAN, filename, BLUE, lineno, + CYAN, function, C_NORMAL, msg)) + + +def null_debug(*args, **kwargs): + pass diff --git a/python/samba/kcc/graph.py b/python/samba/kcc/graph.py new file mode 100644 index 0000000..63f1c3a --- /dev/null +++ b/python/samba/kcc/graph.py @@ -0,0 +1,859 @@ +# Graph functions used by KCC intersite +# +# Copyright (C) Dave Craft 2011 +# Copyright (C) Andrew Bartlett 2015 +# +# Andrew Bartlett's alleged work performed by his underlings Douglas +# Bagnall and Garming Sam. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import itertools +import heapq + +from samba.kcc.graph_utils import write_dot_file, verify_and_dot, verify_graph +from samba.kcc.kcc_utils import KCCError +from samba.ndr import ndr_pack +from samba.dcerpc import misc + +from samba.kcc.debug import DEBUG, DEBUG_FN, WARN + +MAX_DWORD = 2 ** 32 - 1 + + +class ReplInfo(object): + """Represents information about replication + + NTDSConnections use one representation a replication schedule, and + graph vertices use another. This is the Vertex one. + + """ + def __init__(self): + self.cost = 0 + self.interval = 0 + self.options = 0 + self.schedule = None + self.duration = 84 * 8 + + def set_repltimes_from_schedule(self, schedule): + """Convert the schedule and calculate duration + + :param schedule: the schedule to convert + """ + self.schedule = convert_schedule_to_repltimes(schedule) + self.duration = total_schedule(self.schedule) + + +def total_schedule(schedule): + """Return the total number of 15 minute windows in which the schedule + is set to replicate in a week. If the schedule is None it is + assumed that the replication will happen in every 15 minute + window. + + This is essentially a bit population count. + """ + + if schedule is None: + return 84 * 8 # 84 bytes = 84 * 8 bits + + total = 0 + for byte in schedule: + while byte != 0: + total += byte & 1 + byte >>= 1 + return total + + +def convert_schedule_to_repltimes(schedule): + """Convert NTDS Connection schedule to replTime schedule. + + Schedule defined in MS-ADTS 6.1.4.5.2 + ReplTimes defined in MS-DRSR 5.164. + + "Schedule" has 168 bytes but only the lower nibble of each is + significant. There is one byte per hour. Bit 3 (0x08) represents + the first 15 minutes of the hour and bit 0 (0x01) represents the + last 15 minutes. The first byte presumably covers 12am - 1am + Sunday, though the spec doesn't define the start of a week. + + "ReplTimes" has 84 bytes which are the 168 lower nibbles of + "Schedule" packed together. Thus each byte covers 2 hours. Bits 7 + (i.e. 0x80) is the first 15 minutes and bit 0 is the last. The + first byte covers Sunday 12am - 2am (per spec). + + Here we pack two elements of the NTDS Connection schedule slots + into one element of the replTimes list. + + If no schedule appears in NTDS Connection then a default of 0x11 + is set in each replTimes slot as per behaviour noted in a Windows + DC. That default would cause replication within the last 15 + minutes of each hour. + """ + # note, NTDSConnection schedule == None means "once an hour" + # repl_info == None means "always" + if schedule is None or schedule.dataArray[0] is None: + return [0x11] * 84 + + times = [] + data = schedule.dataArray[0].slots + + for i in range(84): + times.append((data[i * 2] & 0xF) << 4 | (data[i * 2 + 1] & 0xF)) + + return times + + +def combine_repl_info(info_a, info_b): + """Generate an repl_info combining two others + + The schedule is set to be the intersection of the two input schedules. + The duration is set to be the duration of the new schedule. + The cost is the sum of the costs (saturating at a huge value). + The options are the intersection of the input options. + The interval is the maximum of the two intervals. + + :param info_a: An input replInfo object + :param info_b: An input replInfo object + :return: a new ReplInfo combining the other 2 + """ + info_c = ReplInfo() + info_c.interval = max(info_a.interval, info_b.interval) + info_c.options = info_a.options & info_b.options + + # schedule of None defaults to "always" + if info_a.schedule is None: + info_a.schedule = [0xFF] * 84 + if info_b.schedule is None: + info_b.schedule = [0xFF] * 84 + + info_c.schedule = [a & b for a, b in zip(info_a.schedule, info_b.schedule)] + info_c.duration = total_schedule(info_c.schedule) + + info_c.cost = min(info_a.cost + info_b.cost, MAX_DWORD) + return info_c + + +def get_spanning_tree_edges(graph, my_site, label=None, verify=False, + dot_file_dir=None): + """Find edges for the intersite graph + + From MS-ADTS 6.2.2.3.4.4 + + :param graph: a kcc.kcc_utils.Graph object + :param my_site: the topology generator's site + :param label: a label for use in dot files and verification + :param verify: if True, try to verify that graph properties are correct + :param dot_file_dir: if not None, write Graphviz dot files here + """ + # Phase 1: Run Dijkstra's to get a list of internal edges, which are + # just the shortest-paths connecting colored vertices + + internal_edges = set() + + for e_set in graph.edge_set: + edgeType = None + for v in graph.vertices: + v.edges = [] + + # All con_type in an edge set is the same + for e in e_set.edges: + edgeType = e.con_type + for v in e.vertices: + v.edges.append(e) + + if verify or dot_file_dir is not None: + graph_edges = [(a.site.site_dnstr, b.site.site_dnstr) + for a, b in + itertools.chain( + *(itertools.combinations(edge.vertices, 2) + for edge in e_set.edges))] + graph_nodes = [v.site.site_dnstr for v in graph.vertices] + + if dot_file_dir is not None: + write_dot_file('edgeset_%s' % (edgeType,), graph_edges, + vertices=graph_nodes, label=label) + + if verify: + errors = verify_graph(graph_edges, vertices=graph_nodes, + properties=('complete', 'connected')) + if errors: + DEBUG('spanning tree edge set %s FAILED' % edgeType) + for p, e, doc in errors: + DEBUG("%18s: %s" % (p, e)) + raise KCCError("spanning tree failed") + + # Run dijkstra's algorithm with just the red vertices as seeds + # Seed from the full replicas + dijkstra(graph, edgeType, False) + + # Process edge set + process_edge_set(graph, e_set, internal_edges) + + # Run dijkstra's algorithm with red and black vertices as the seeds + # Seed from both full and partial replicas + dijkstra(graph, edgeType, True) + + # Process edge set + process_edge_set(graph, e_set, internal_edges) + + # All vertices have root/component as itself + setup_vertices(graph) + process_edge_set(graph, None, internal_edges) + + if verify or dot_file_dir is not None: + graph_edges = [(e.v1.site.site_dnstr, e.v2.site.site_dnstr) + for e in internal_edges] + graph_nodes = [v.site.site_dnstr for v in graph.vertices] + verify_properties = ('multi_edge_forest',) + verify_and_dot('prekruskal', graph_edges, graph_nodes, label=label, + properties=verify_properties, debug=DEBUG, + verify=verify, dot_file_dir=dot_file_dir) + + # Phase 2: Run Kruskal's on the internal edges + output_edges, components = kruskal(graph, internal_edges) + + # This recalculates the cost for the path connecting the + # closest red vertex. Ignoring types is fine because NO + # suboptimal edge should exist in the graph + dijkstra(graph, "EDGE_TYPE_ALL", False) # TODO rename + # Phase 3: Process the output + for v in graph.vertices: + if v.is_red(): + v.dist_to_red = 0 + else: + v.dist_to_red = v.repl_info.cost + + if verify or dot_file_dir is not None: + graph_edges = [(e.v1.site.site_dnstr, e.v2.site.site_dnstr) + for e in internal_edges] + graph_nodes = [v.site.site_dnstr for v in graph.vertices] + verify_properties = ('multi_edge_forest',) + verify_and_dot('postkruskal', graph_edges, graph_nodes, + label=label, properties=verify_properties, + debug=DEBUG, verify=verify, + dot_file_dir=dot_file_dir) + + # Ensure only one-way connections for partial-replicas, + # and make sure they point the right way. + edge_list = [] + for edge in output_edges: + # We know these edges only have two endpoints because we made + # them. + v, w = edge.vertices + if v.site is my_site or w.site is my_site: + if (((v.is_black() or w.is_black()) and + v.dist_to_red != MAX_DWORD)): + edge.directed = True + + if w.dist_to_red < v.dist_to_red: + edge.vertices[:] = w, v + edge_list.append(edge) + + if verify or dot_file_dir is not None: + graph_edges = [[x.site.site_dnstr for x in e.vertices] + for e in edge_list] + # add the reverse edge if not directed. + graph_edges.extend([x.site.site_dnstr + for x in reversed(e.vertices)] + for e in edge_list if not e.directed) + graph_nodes = [x.site.site_dnstr for x in graph.vertices] + verify_properties = () + verify_and_dot('post-one-way-partial', graph_edges, graph_nodes, + label=label, properties=verify_properties, + debug=DEBUG, verify=verify, + directed=True, + dot_file_dir=dot_file_dir) + + # count the components + return edge_list, components + + +def create_edge(con_type, site_link, guid_to_vertex): + """Set up a MultiEdge for the intersite graph + + A MultiEdge can have multiple vertices. + + From MS-ADTS 6.2.2.3.4.4 + + :param con_type: a transport type GUID + :param site_link: a kcc.kcc_utils.SiteLink object + :param guid_to_vertex: a mapping between GUIDs and vertices + :return: a MultiEdge + """ + e = MultiEdge() + e.site_link = site_link + e.vertices = [] + for site_guid, site_dn in site_link.site_list: + if str(site_guid) in guid_to_vertex: + e.vertices.extend(guid_to_vertex.get(str(site_guid))) + e.repl_info.cost = site_link.cost + e.repl_info.options = site_link.options + e.repl_info.interval = site_link.interval + e.repl_info.set_repltimes_from_schedule(site_link.schedule) + e.con_type = con_type + e.directed = False + return e + + +def create_auto_edge_set(graph, transport_guid): + """Set up an automatic MultiEdgeSet for the intersite graph + + From within MS-ADTS 6.2.2.3.4.4 + + :param graph: the intersite graph object + :param transport_guid: a transport type GUID + :return: a MultiEdgeSet + """ + e_set = MultiEdgeSet() + # use a NULL guid, not associated with a SiteLinkBridge object + e_set.guid = misc.GUID() + for site_link in graph.edges: + if site_link.con_type == transport_guid: + e_set.edges.append(site_link) + + return e_set + + +def setup_vertices(graph): + """Initialise vertices in the graph for the Dijkstra's run. + + Part of MS-ADTS 6.2.2.3.4.4 + + The schedule and options are set to all-on, so that intersections + with real data defer to that data. + + Refer to the convert_schedule_to_repltimes() docstring for an + explanation of the repltimes schedule values. + + :param graph: an IntersiteGraph object + :return: None + """ + for v in graph.vertices: + if v.is_white(): + v.repl_info.cost = MAX_DWORD + v.root = None + v.component_id = None + else: + v.repl_info.cost = 0 + v.root = v + v.component_id = v + + v.repl_info.interval = 0 + v.repl_info.options = 0xFFFFFFFF + # repl_info.schedule == None means "always". + v.repl_info.schedule = None + v.repl_info.duration = 84 * 8 + v.demoted = False + + +def dijkstra(graph, edge_type, include_black): + """Perform Dijkstra's algorithm on an intersite graph. + + :param graph: an IntersiteGraph object + :param edge_type: a transport type GUID + :param include_black: boolean, whether to include black vertices + :return: None + """ + queue = setup_dijkstra(graph, edge_type, include_black) + while len(queue) > 0: + cost, guid, vertex = heapq.heappop(queue) + for edge in vertex.edges: + for v in edge.vertices: + if v is not vertex: + # add new path from vertex to v + try_new_path(graph, queue, vertex, edge, v) + + +def setup_dijkstra(graph, edge_type, include_black): + """Create a vertex queue for Dijksta's algorithm. + + :param graph: an IntersiteGraph object + :param edge_type: a transport type GUID + :param include_black: boolean, whether to include black vertices + :return: A heap queue of vertices + """ + queue = [] + setup_vertices(graph) + for vertex in graph.vertices: + if vertex.is_white(): + continue + + if (((vertex.is_black() and not include_black) + or edge_type not in vertex.accept_black + or edge_type not in vertex.accept_red_red)): + vertex.repl_info.cost = MAX_DWORD + vertex.root = None # NULL GUID + vertex.demoted = True # Demoted appears not to be used + else: + heapq.heappush(queue, (vertex.repl_info.cost, vertex.guid, vertex)) + + return queue + + +def try_new_path(graph, queue, vfrom, edge, vto): + """Helper function for Dijksta's algorithm. + + :param graph: an IntersiteGraph object + :param queue: the empty queue to initialise. + :param vfrom: Vertex we are coming from + :param edge: an edge to try + :param vto: the other Vertex + :return: None + """ + new_repl_info = combine_repl_info(vfrom.repl_info, edge.repl_info) + + # Cheaper or longer schedule goes in the heap + + if (new_repl_info.cost < vto.repl_info.cost or + new_repl_info.duration > vto.repl_info.duration): + vto.root = vfrom.root + vto.component_id = vfrom.component_id + vto.repl_info = new_repl_info + heapq.heappush(queue, (vto.repl_info.cost, vto.guid, vto)) + + +def check_demote_vertex(vertex, edge_type): + """Demote non-white vertices that accept only white edges + + This makes them seem temporarily like white vertices. + + :param vertex: a Vertex() + :param edge_type: a transport type GUID + :return: None + """ + if vertex.is_white(): + return + + # Accepts neither red-red nor black edges, demote + if ((edge_type not in vertex.accept_black and + edge_type not in vertex.accept_red_red)): + vertex.repl_info.cost = MAX_DWORD + vertex.root = None + vertex.demoted = True # Demoted appears not to be used + + +def undemote_vertex(vertex): + """Un-demote non-white vertices + + Set a vertex's to an undemoted state. + + :param vertex: a Vertex() + :return: None + """ + if vertex.is_white(): + return + + vertex.repl_info.cost = 0 + vertex.root = vertex + vertex.demoted = False + + +def process_edge_set(graph, e_set, internal_edges): + """Find internal edges to pass to Kruskal's algorithm + + :param graph: an IntersiteGraph object + :param e_set: an edge set + :param internal_edges: a set that internal edges get added to + :return: None + """ + if e_set is None: + for edge in graph.edges: + for vertex in edge.vertices: + check_demote_vertex(vertex, edge.con_type) + process_edge(graph, edge, internal_edges) + for vertex in edge.vertices: + undemote_vertex(vertex) + else: + for edge in e_set.edges: + process_edge(graph, edge, internal_edges) + + +def process_edge(graph, examine, internal_edges): + """Find the set of all vertices touching an edge to examine + + :param graph: an IntersiteGraph object + :param examine: an edge + :param internal_edges: a set that internal edges get added to + :return: None + """ + vertices = [] + for v in examine.vertices: + # Append a 4-tuple of color, repl cost, guid and vertex + vertices.append((v.color, v.repl_info.cost, v.ndrpacked_guid, v)) + # Sort by color, lower + DEBUG("vertices is %s" % vertices) + vertices.sort() + + color, cost, guid, bestv = vertices[0] + # Add to internal edges an edge from every colored vertex to bestV + for v in examine.vertices: + if v.component_id is None or v.root is None: + continue + + # Only add edge if valid inter-tree edge - needs a root and + # different components + if ((bestv.component_id is not None and + bestv.root is not None and + v.component_id is not None and + v.root is not None and + bestv.component_id != v.component_id)): + add_int_edge(graph, internal_edges, examine, bestv, v) + + +def add_int_edge(graph, internal_edges, examine, v1, v2): + """Add edges between compatible red and black vertices + + Internal edges form the core of the tree -- white and RODC + vertices attach to it as leaf nodes. An edge needs to have black + or red endpoints with compatible replication schedules to be + accepted as an internal edge. + + Here we examine an edge and add it to the set of internal edges if + it looks good. + + :param graph: the graph object. + :param internal_edges: a set of internal edges + :param examine: an edge to examine for suitability. + :param v1: a Vertex + :param v2: the other Vertex + """ + root1 = v1.root + root2 = v2.root + + red_red = root1.is_red() and root2.is_red() + + if red_red: + if (examine.con_type not in root1.accept_red_red + or examine.con_type not in root2.accept_red_red): + return + elif (examine.con_type not in root1.accept_black + or examine.con_type not in root2.accept_black): + return + + # Create the transitive replInfo for the two trees and this edge + ri = combine_repl_info(v1.repl_info, v2.repl_info) + if ri.duration == 0: + return + + ri2 = combine_repl_info(ri, examine.repl_info) + if ri2.duration == 0: + return + + # Order by vertex guid + if root1.ndrpacked_guid > root2.ndrpacked_guid: + root1, root2 = root2, root1 + + newIntEdge = InternalEdge(root1, root2, red_red, ri2, examine.con_type, + examine.site_link) + + internal_edges.add(newIntEdge) + + +def kruskal(graph, edges): + """Perform Kruskal's algorithm using the given set of edges + + The input edges are "internal edges" -- between red and black + nodes. The output edges are a minimal spanning tree. + + :param graph: the graph object. + :param edges: a set of edges + :return: a tuple of a list of edges, and the number of components + """ + for v in graph.vertices: + v.edges = [] + + components = set([x for x in graph.vertices if not x.is_white()]) + edges = list(edges) + + # Sorted based on internal comparison function of internal edge + edges.sort() + + # XXX expected_num_tree_edges is never used + expected_num_tree_edges = 0 # TODO this value makes little sense + + count_edges = 0 + output_edges = [] + index = 0 + while index < len(edges): # TODO and num_components > 1 + e = edges[index] + parent1 = find_component(e.v1) + parent2 = find_component(e.v2) + if parent1 is not parent2: + count_edges += 1 + add_out_edge(graph, output_edges, e) + parent1.component_id = parent2 + components.discard(parent1) + + index += 1 + + return output_edges, len(components) + + +def find_component(vertex): + """Kruskal helper to find the component a vertex belongs to. + + :param vertex: a Vertex + :return: the Vertex object representing the component + """ + if vertex.component_id is vertex: + return vertex + + current = vertex + while current.component_id is not current: + current = current.component_id + + root = current + current = vertex + while current.component_id is not root: + n = current.component_id + current.component_id = root + current = n + + return root + + +def add_out_edge(graph, output_edges, e): + """Kruskal helper to add output edges + + :param graph: the InterSiteGraph + :param output_edges: the list of spanning tree edges + :param e: the edge to be added + :return: None + """ + v1 = e.v1 + v2 = e.v2 + + # This multi-edge is a 'real' undirected 2-vertex edge with no + # GUID. XXX It is not really the same thing at all as the + # multi-vertex edges relating to site-links. We shouldn't really + # be using the same class or storing them in the same list as the + # other ones. But we do. Historical reasons. + ee = MultiEdge() + ee.directed = False + ee.site_link = e.site_link + ee.vertices.append(v1) + ee.vertices.append(v2) + ee.con_type = e.e_type + ee.repl_info = e.repl_info + output_edges.append(ee) + + v1.edges.append(ee) + v2.edges.append(ee) + + +def setup_graph(part, site_table, transport_guid, sitelink_table, + bridges_required): + """Set up an IntersiteGraph based on intersite topology + + The graph will have a Vertex for each site, a MultiEdge for each + siteLink object, and a MultiEdgeSet for each siteLinkBridge object + (or implied siteLinkBridge). + + :param part: the partition we are dealing with + :param site_table: a mapping of guids to sites (KCC.site_table) + :param transport_guid: the GUID of the IP transport + :param sitelink_table: a mapping of dnstrs to sitelinks + :param bridges_required: boolean, asking in vain for something to do + with site link bridges + :return: a new IntersiteGraph + """ + guid_to_vertex = {} + # Create graph + g = IntersiteGraph() + # Add vertices + for site_guid, site in site_table.items(): + vertex = Vertex(site, part) + vertex.guid = site_guid + vertex.ndrpacked_guid = ndr_pack(site.site_guid) + g.vertices.add(vertex) + guid_vertices = guid_to_vertex.setdefault(site_guid, []) + guid_vertices.append(vertex) + + connected_vertices = set() + + for site_link_dn, site_link in sitelink_table.items(): + new_edge = create_edge(transport_guid, site_link, + guid_to_vertex) + connected_vertices.update(new_edge.vertices) + g.edges.add(new_edge) + + # XXX we are ignoring the bridges_required option and indeed the + # whole concept of SiteLinkBridge objects. + if bridges_required: + WARN("Samba KCC ignores the bridges required option") + + g.edge_set.add(create_auto_edge_set(g, transport_guid)) + g.connected_vertices = connected_vertices + + return g + + +class VertexColor(object): + """Enumeration of vertex colours""" + (red, black, white, unknown) = range(0, 4) + + +class Vertex(object): + """intersite graph representation of a Site. + + There is a separate vertex for each partition. + + :param site: the site to make a vertex of. + :param part: the partition. + """ + def __init__(self, site, part): + self.site = site + self.part = part + self.color = VertexColor.unknown + self.edges = [] + self.accept_red_red = [] + self.accept_black = [] + self.repl_info = ReplInfo() + self.root = self + self.guid = None + self.component_id = self + self.demoted = False + self.options = 0 + self.interval = 0 + + def color_vertex(self): + """Color to indicate which kind of NC replica the vertex contains + """ + # IF s contains one or more DCs with full replicas of the + # NC cr!nCName + # SET v.Color to COLOR.RED + # ELSEIF s contains one or more partial replicas of the NC + # SET v.Color to COLOR.BLACK + # ELSE + # SET v.Color to COLOR.WHITE + + # set to minimum (no replica) + self.color = VertexColor.white + + for dnstr, dsa in self.site.dsa_table.items(): + rep = dsa.get_current_replica(self.part.nc_dnstr) + if rep is None: + continue + + # We have a full replica which is the largest + # value so exit + if not rep.is_partial(): + self.color = VertexColor.red + break + else: + self.color = VertexColor.black + + def is_red(self): + assert(self.color != VertexColor.unknown) + return (self.color == VertexColor.red) + + def is_black(self): + assert(self.color != VertexColor.unknown) + return (self.color == VertexColor.black) + + def is_white(self): + assert(self.color != VertexColor.unknown) + return (self.color == VertexColor.white) + + +class IntersiteGraph(object): + """Graph for representing the intersite""" + def __init__(self): + self.vertices = set() + self.edges = set() + self.edge_set = set() + # All vertices that are endpoints of edges + self.connected_vertices = None + + +class MultiEdgeSet(object): + """Defines a multi edge set""" + def __init__(self): + self.guid = 0 # objectGuid siteLinkBridge + self.edges = [] + + +class MultiEdge(object): + """An "edge" between multiple vertices""" + def __init__(self): + self.site_link = None # object siteLink + self.vertices = [] + self.con_type = None # interSiteTransport GUID + self.repl_info = ReplInfo() + self.directed = True + + +class InternalEdge(object): + """An edge that forms part of the minimal spanning tree + + These are used in the Kruskal's algorithm. Their interesting + feature isa that they are sortable, with the good edges sorting + before the bad ones -- lower is better. + """ + def __init__(self, v1, v2, redred, repl, eType, site_link): + self.v1 = v1 + self.v2 = v2 + self.red_red = redred + self.repl_info = repl + self.e_type = eType + self.site_link = site_link + + def __hash__(self): + return hash(( + self.v1, self.v2, self.red_red, self.repl_info, self.e_type, + self.site_link)) + + def __eq__(self, other): + return not self < other and not other < self + + def __ne__(self, other): + return self < other or other < self + + def __gt__(self, other): + return other < self + + def __ge__(self, other): + return not self < other + + def __le__(self, other): + return not other < self + + def __lt__(self, other): + """Here "less than" means "better". + + From within MS-ADTS 6.2.2.3.4.4: + + SORT internalEdges by (descending RedRed, + ascending ReplInfo.Cost, + descending available time in ReplInfo.Schedule, + ascending V1ID, + ascending V2ID, + ascending Type) + """ + if self.red_red != other.red_red: + return self.red_red + + if self.repl_info.cost != other.repl_info.cost: + return self.repl_info.cost < other.repl_info.cost + + if self.repl_info.duration != other.repl_info.duration: + return self.repl_info.duration > other.repl_info.duration + + if self.v1.guid != other.v1.guid: + return self.v1.ndrpacked_guid < other.v1.ndrpacked_guid + + if self.v2.guid != other.v2.guid: + return self.v2.ndrpacked_guid < other.v2.ndrpacked_guid + + return self.e_type < other.e_type diff --git a/python/samba/kcc/graph_utils.py b/python/samba/kcc/graph_utils.py new file mode 100644 index 0000000..c89d06a --- /dev/null +++ b/python/samba/kcc/graph_utils.py @@ -0,0 +1,343 @@ +# Graph topology utilities, used by KCC +# +# Copyright (C) Andrew Bartlett 2015 +# +# Copyright goes to Andrew Bartlett, but the actual work was performed +# by Douglas Bagnall and Garming Sam. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os +import itertools + +from samba.graph import dot_graph + + +def write_dot_file(basename, edge_list, vertices=None, label=None, + dot_file_dir=None, debug=None, **kwargs): + s = dot_graph(vertices, edge_list, title=label, **kwargs) + if label: + # sanitise DN and guid labels + basename += '_' + label.replace(', ', '') + + filename = os.path.join(dot_file_dir, "%s.dot" % basename) + if debug is not None: + debug("writing graph to %s" % filename) + f = open(filename, 'w') + f.write(s) + f.close() + + +class GraphError(Exception): + pass + + +def verify_graph_complete(edges, vertices, edge_vertices): + """The graph is complete, which is to say there is an edge between + every pair of nodes.""" + for v in vertices: + remotes = set() + for a, b in edges: + if a == v: + remotes.add(b) + elif b == v: + remotes.add(a) + if len(remotes) + 1 != len(vertices): + raise GraphError("graph is not fully connected") + + +def verify_graph_connected(edges, vertices, edge_vertices): + """There is a path between any two nodes.""" + if not edges: + if len(vertices) <= 1: + return + raise GraphError("all vertices are disconnected because " + "there are no edges:") + + remaining_edges = list(edges) + reached = set(remaining_edges.pop()) + while True: + doomed = [] + for i, e in enumerate(remaining_edges): + a, b = e + if a in reached: + reached.add(b) + doomed.append(i) + elif b in reached: + reached.add(a) + doomed.append(i) + if not doomed: + break + for i in reversed(doomed): + del remaining_edges[i] + + if remaining_edges or reached != set(vertices): + s = ("the graph is not connected, " + "as the following vertices are unreachable:\n ") + s += '\n '.join(v for v in sorted(vertices) + if v not in reached) + raise GraphError(s) + + +def verify_graph_connected_under_edge_failures(edges, vertices, edge_vertices): + """The graph stays connected when any single edge is removed.""" + if len(edges) == 0: + return verify_graph_connected(edges, vertices, edge_vertices) + + for subset in itertools.combinations(edges, len(edges) - 1): + try: + verify_graph_connected(subset, vertices, edge_vertices) + except GraphError as e: + for edge in edges: + if edge not in subset: + raise GraphError("The graph will be disconnected when the " + "connection from %s to %s fails" % edge) + + +def verify_graph_connected_under_vertex_failures(edges, vertices, + edge_vertices): + """The graph stays connected when any single vertex is removed.""" + for v in vertices: + sub_vertices = [x for x in vertices if x is not v] + sub_edges = [x for x in edges if v not in x] + verify_graph_connected(sub_edges, sub_vertices, sub_vertices) + + +def verify_graph_forest(edges, vertices, edge_vertices): + """The graph contains no loops.""" + trees = [set(e) for e in edges] + while True: + for a, b in itertools.combinations(trees, 2): + intersection = a & b + if intersection: + if len(intersection) == 1: + a |= b + trees.remove(b) + break + else: + raise GraphError("there is a loop in the graph\n" + " vertices %s\n edges %s\n" + " intersection %s" % + (vertices, edges, intersection)) + else: + # no break in itertools.combinations loop means no + # further mergers, so we're done. + # + # XXX here we also know whether it is a tree or a + # forest by len(trees) but the connected test already + # tells us that. + return + + +def verify_graph_multi_edge_forest(edges, vertices, edge_vertices): + """This allows a forest with duplicate edges. That is if multiple + edges go between the same two vertices, they are treated as a + single edge by this test. + + e.g.: + o + pass: o-o=o o=o (|) fail: o-o + `o o `o' + """ + unique_edges = set(edges) + trees = [set(e) for e in unique_edges] + while True: + for a, b in itertools.combinations(trees, 2): + intersection = a & b + if intersection: + if len(intersection) == 1: + a |= b + trees.remove(b) + break + else: + raise GraphError("there is a loop in the graph") + else: + return + + +def verify_graph_no_lonely_vertices(edges, vertices, edge_vertices): + """There are no vertices without edges.""" + lonely = set(vertices) - set(edge_vertices) + if lonely: + raise GraphError("some vertices are not connected:\n%s" % + '\n'.join(sorted(lonely))) + + +def verify_graph_no_unknown_vertices(edges, vertices, edge_vertices): + """The edge endpoints contain no vertices that are otherwise unknown.""" + unknown = set(edge_vertices) - set(vertices) + if unknown: + raise GraphError("some edge vertices are seemingly unknown:\n%s" % + '\n'.join(sorted(unknown))) + + +def verify_graph_directed_double_ring(edges, vertices, edge_vertices): + """Each node has at least two directed edges leaving it, and two + arriving. The edges work in pairs that have the same end points + but point in opposite directions. The pairs form a path that + touches every vertex and form a loop. + + There might be other connections that *aren't* part of the ring. + + Deciding this for sure is NP-complete (the Hamiltonian path + problem), but there are some easy failures that can be detected. + So far we check for: + - leaf nodes + - disjoint subgraphs + - robustness against edge and vertex failure + """ + # a zero or one node graph is OK with no edges. + # The two vertex case is special. Use + # verify_graph_directed_double_ring_or_small() to allow that. + if not edges and len(vertices) <= 1: + return + if len(edges) < 2 * len(vertices): + raise GraphError("directed double ring requires at least twice " + "as many edges as vertices") + + # Reduce the problem space by looking only at bi-directional links. + half_duplex = set(edges) + duplex_links = set() + for edge in edges: + rev_edge = (edge[1], edge[0]) + if edge in half_duplex and rev_edge in half_duplex: + duplex_links.add(edge) + half_duplex.remove(edge) + half_duplex.remove(rev_edge) + + # the Hamiltonian cycle problem is NP-complete in general, but we + # can cheat a bit and prove a less strong result. + # + # We declutter the graph by replacing nodes with edges connecting + # their neighbours. + # + # A-B-C --> A-C + # + # -A-B-C- --> -A--C- + # `D_ `D'_ + # + # In the end there should be a single 2 vertex graph. + + edge_map = {} + for a, b in duplex_links: + edge_map.setdefault(a, set()).add(b) + edge_map.setdefault(b, set()).add(a) + + # an easy to detect failure is a lonely leaf node + for vertex, neighbours in edge_map.items(): + if len(neighbours) == 1: + raise GraphError("wanted double directed ring, found a leaf node" + "(%s)" % vertex) + + for vertex in list(edge_map.keys()): + nset = edge_map[vertex] + if not nset: + continue + for n in nset: + n_neighbours = edge_map[n] + n_neighbours.remove(vertex) + n_neighbours.update(x for x in nset if x != n) + del edge_map[vertex] + + if len(edge_map) > 1: + raise GraphError("wanted double directed ring, but " + "this looks like a split graph\n" + "(%s can't reach each other)" % + ', '.join(edge_map.keys())) + + verify_graph_connected_under_edge_failures(duplex_links, vertices, + edge_vertices) + verify_graph_connected_under_vertex_failures(duplex_links, vertices, + edge_vertices) + + +def verify_graph_directed_double_ring_or_small(edges, vertices, edge_vertices): + """This performs the directed_double_ring test but makes special + concessions for small rings where the strict rules don't really + apply.""" + if len(vertices) < 2: + return + if len(vertices) == 2: + """With 2 nodes there should be a single link in each directions.""" + if (len(edges) == 2 and + edges[0][0] == edges[1][1] and + edges[0][1] == edges[1][0]): + return + raise GraphError("A two vertex graph should have an edge each way.") + + return verify_graph_directed_double_ring(edges, vertices, edge_vertices) + + +def verify_graph(edges, vertices=None, directed=False, properties=()): + errors = [] + properties = [x.replace(' ', '_') for x in properties] + + edge_vertices = set() + for a, b in edges: + edge_vertices.add(a) + edge_vertices.add(b) + + if vertices is None: + vertices = edge_vertices + else: + vertices = set(vertices) + + for p in properties: + fn = 'verify_graph_%s' % p + f = globals()[fn] + try: + f(edges, vertices, edge_vertices) + except GraphError as e: + errors.append((p, e, f.__doc__)) + + return errors + + +def verify_and_dot(basename, edges, vertices=None, label=None, + reformat_labels=True, directed=False, + properties=(), fatal=True, debug=None, + verify=True, dot_file_dir=None, + edge_colors=None, edge_labels=None, + vertex_colors=None): + + if dot_file_dir is not None: + write_dot_file(basename, edges, vertices=vertices, label=label, + dot_file_dir=dot_file_dir, + reformat_labels=reformat_labels, directed=directed, + debug=debug, edge_colors=edge_colors, + edge_labels=edge_labels, vertex_colors=vertex_colors) + + if verify: + errors = verify_graph(edges, vertices, + properties=properties) + if errors: + title = '%s %s' % (basename, label or '') + debug("%s FAILED:" % title) + for p, e, doc in errors: + debug(" %18s: %s" % (p, e)) + if fatal: + raise GraphError("The '%s' graph lacks the following " + "properties:\n%s" % + (title, '\n'.join('%s: %s' % (p, e) + for p, e, doc in errors))) + + +def list_verify_tests(): + for k, v in sorted(globals().items()): + if k.startswith('verify_graph_'): + print(k.replace('verify_graph_', '')) + if v.__doc__: + print(' %s' % (v.__doc__.rstrip())) + else: + print() diff --git a/python/samba/kcc/kcc_utils.py b/python/samba/kcc/kcc_utils.py new file mode 100644 index 0000000..326889d --- /dev/null +++ b/python/samba/kcc/kcc_utils.py @@ -0,0 +1,2364 @@ +# KCC topology utilities +# +# Copyright (C) Dave Craft 2011 +# Copyright (C) Jelmer Vernooij 2011 +# Copyright (C) Andrew Bartlett 2015 +# +# Andrew Bartlett's alleged work performed by his underlings Douglas +# Bagnall and Garming Sam. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +import sys +import ldb +import uuid + +from samba import dsdb +from samba.dcerpc import ( + drsblobs, + drsuapi, + misc, +) +from samba.samdb import dsdb_Dn +from samba.ndr import ndr_unpack, ndr_pack +from collections import Counter + + +class KCCError(Exception): + pass + + +class NCType(object): + (unknown, schema, domain, config, application) = range(0, 5) + + +# map the NCType enum to strings for debugging +nctype_lut = dict((v, k) for k, v in NCType.__dict__.items() if k[:2] != '__') + + +class NamingContext(object): + """Base class for a naming context. + + Holds the DN, GUID, SID (if available) and type of the DN. + Subclasses may inherit from this and specialize + """ + + def __init__(self, nc_dnstr): + """Instantiate a NamingContext + + :param nc_dnstr: NC dn string + """ + self.nc_dnstr = nc_dnstr + self.nc_guid = None + self.nc_sid = None + self.nc_type = NCType.unknown + + def __str__(self): + """Debug dump string output of class""" + text = "%s:" % (self.__class__.__name__,) +\ + "\n\tnc_dnstr=%s" % self.nc_dnstr +\ + "\n\tnc_guid=%s" % str(self.nc_guid) + + if self.nc_sid is None: + text = text + "\n\tnc_sid=" + else: + text = text + "\n\tnc_sid=" + + text = text + "\n\tnc_type=%s (%s)" % (nctype_lut[self.nc_type], + self.nc_type) + return text + + def load_nc(self, samdb): + attrs = ["objectGUID", + "objectSid"] + try: + res = samdb.search(base=self.nc_dnstr, + scope=ldb.SCOPE_BASE, attrs=attrs) + + except ldb.LdbError as e: + (enum, estr) = e.args + raise KCCError("Unable to find naming context (%s) - (%s)" % + (self.nc_dnstr, estr)) + msg = res[0] + if "objectGUID" in msg: + self.nc_guid = misc.GUID(samdb.schema_format_value("objectGUID", + msg["objectGUID"][0])) + if "objectSid" in msg: + self.nc_sid = msg["objectSid"][0] + + assert self.nc_guid is not None + + def is_config(self): + """Return True if NC is config""" + assert self.nc_type != NCType.unknown + return self.nc_type == NCType.config + + def identify_by_basedn(self, samdb): + """Given an NC object, identify what type it is thru + the samdb basedn strings and NC sid value + """ + # Invoke loader to initialize guid and more + # importantly sid value (sid is used to identify + # domain NCs) + if self.nc_guid is None: + self.load_nc(samdb) + + # We check against schema and config because they + # will be the same for all nTDSDSAs in the forest. + # That leaves the domain NCs which can be identified + # by sid and application NCs as the last identified + if self.nc_dnstr == str(samdb.get_schema_basedn()): + self.nc_type = NCType.schema + elif self.nc_dnstr == str(samdb.get_config_basedn()): + self.nc_type = NCType.config + elif self.nc_sid is not None: + self.nc_type = NCType.domain + else: + self.nc_type = NCType.application + + def identify_by_dsa_attr(self, samdb, attr): + """Given an NC which has been discovered thru the + nTDSDSA database object, determine what type of NC + it is (i.e. schema, config, domain, application) via + the use of the schema attribute under which the NC + was found. + + :param attr: attr of nTDSDSA object where NC DN appears + """ + # If the NC is listed under msDS-HasDomainNCs then + # this can only be a domain NC and it is our default + # domain for this dsa + if attr == "msDS-HasDomainNCs": + self.nc_type = NCType.domain + + # If the NC is listed under hasPartialReplicaNCs + # this is only a domain NC + elif attr == "hasPartialReplicaNCs": + self.nc_type = NCType.domain + + # NCs listed under hasMasterNCs are either + # default domain, schema, or config. We + # utilize the identify_by_basedn() to + # identify those + elif attr == "hasMasterNCs": + self.identify_by_basedn(samdb) + + # Still unknown (unlikely) but for completeness + # and for finally identifying application NCs + if self.nc_type == NCType.unknown: + self.identify_by_basedn(samdb) + + +class NCReplica(NamingContext): + """Naming context replica that is relative to a specific DSA. + + This is a more specific form of NamingContext class (inheriting from that + class) and it identifies unique attributes of the DSA's replica for a NC. + """ + + def __init__(self, dsa, nc_dnstr): + """Instantiate a Naming Context Replica + + :param dsa_guid: GUID of DSA where replica appears + :param nc_dnstr: NC dn string + """ + self.rep_dsa_dnstr = dsa.dsa_dnstr + self.rep_dsa_guid = dsa.dsa_guid + self.rep_default = False # replica for DSA's default domain + self.rep_partial = False + self.rep_ro = False + self.rep_instantiated_flags = 0 + + self.rep_fsmo_role_owner = None + + # RepsFromTo tuples + self.rep_repsFrom = [] + + # RepsFromTo tuples + self.rep_repsTo = [] + + # The (is present) test is a combination of being + # enumerated in (hasMasterNCs or msDS-hasFullReplicaNCs or + # hasPartialReplicaNCs) as well as its replica flags found + # thru the msDS-HasInstantiatedNCs. If the NC replica meets + # the first enumeration test then this flag is set true + self.rep_present_criteria_one = False + + # Call my super class we inherited from + NamingContext.__init__(self, nc_dnstr) + + def __str__(self): + """Debug dump string output of class""" + text = "%s:" % self.__class__.__name__ +\ + "\n\tdsa_dnstr=%s" % self.rep_dsa_dnstr +\ + "\n\tdsa_guid=%s" % self.rep_dsa_guid +\ + "\n\tdefault=%s" % self.rep_default +\ + "\n\tro=%s" % self.rep_ro +\ + "\n\tpartial=%s" % self.rep_partial +\ + "\n\tpresent=%s" % self.is_present() +\ + "\n\tfsmo_role_owner=%s" % self.rep_fsmo_role_owner +\ + "".join("\n%s" % rep for rep in self.rep_repsFrom) +\ + "".join("\n%s" % rep for rep in self.rep_repsTo) + + return "%s\n%s" % (NamingContext.__str__(self), text) + + def set_instantiated_flags(self, flags=0): + """Set or clear NC replica instantiated flags""" + self.rep_instantiated_flags = flags + + def identify_by_dsa_attr(self, samdb, attr): + """Given an NC which has been discovered thru the + nTDSDSA database object, determine what type of NC + replica it is (i.e. partial, read only, default) + + :param attr: attr of nTDSDSA object where NC DN appears + """ + # If the NC was found under hasPartialReplicaNCs + # then a partial replica at this dsa + if attr == "hasPartialReplicaNCs": + self.rep_partial = True + self.rep_present_criteria_one = True + + # If the NC is listed under msDS-HasDomainNCs then + # this can only be a domain NC and it is the DSA's + # default domain NC + elif attr == "msDS-HasDomainNCs": + self.rep_default = True + + # NCs listed under hasMasterNCs are either + # default domain, schema, or config. We check + # against schema and config because they will be + # the same for all nTDSDSAs in the forest. That + # leaves the default domain NC remaining which + # may be different for each nTDSDSAs (and thus + # we don't compare against this samdb's default + # basedn + elif attr == "hasMasterNCs": + self.rep_present_criteria_one = True + + if self.nc_dnstr != str(samdb.get_schema_basedn()) and \ + self.nc_dnstr != str(samdb.get_config_basedn()): + self.rep_default = True + + # RODC only + elif attr == "msDS-hasFullReplicaNCs": + self.rep_present_criteria_one = True + self.rep_ro = True + + # Not RODC + elif attr == "msDS-hasMasterNCs": + self.rep_present_criteria_one = True + self.rep_ro = False + + # Now use this DSA attribute to identify the naming + # context type by calling the super class method + # of the same name + NamingContext.identify_by_dsa_attr(self, samdb, attr) + + def is_default(self): + """Whether this is a default domain for the dsa that this NC appears on + """ + return self.rep_default + + def is_ro(self): + """Return True if NC replica is read only""" + return self.rep_ro + + def is_partial(self): + """Return True if NC replica is partial""" + return self.rep_partial + + def is_present(self): + """Given an NC replica which has been discovered thru the + nTDSDSA database object and populated with replica flags + from the msDS-HasInstantiatedNCs; return whether the NC + replica is present (true) or if the IT_NC_GOING flag is + set then the NC replica is not present (false) + """ + if self.rep_present_criteria_one and \ + self.rep_instantiated_flags & dsdb.INSTANCE_TYPE_NC_GOING == 0: + return True + return False + + def load_repsFrom(self, samdb): + """Given an NC replica which has been discovered thru the nTDSDSA + database object, load the repsFrom attribute for the local replica. + held by my dsa. The repsFrom attribute is not replicated so this + attribute is relative only to the local DSA that the samdb exists on + """ + try: + res = samdb.search(base=self.nc_dnstr, scope=ldb.SCOPE_BASE, + attrs=["repsFrom"]) + + except ldb.LdbError as e1: + (enum, estr) = e1.args + raise KCCError("Unable to find NC for (%s) - (%s)" % + (self.nc_dnstr, estr)) + + msg = res[0] + + # Possibly no repsFrom if this is a singleton DC + if "repsFrom" in msg: + for value in msg["repsFrom"]: + try: + unpacked = ndr_unpack(drsblobs.repsFromToBlob, value) + except RuntimeError as e: + print("bad repsFrom NDR: %r" % (value), + file=sys.stderr) + continue + rep = RepsFromTo(self.nc_dnstr, unpacked) + self.rep_repsFrom.append(rep) + + def commit_repsFrom(self, samdb, ro=False): + """Commit repsFrom to the database""" + + # XXX - This is not truly correct according to the MS-TECH + # docs. To commit a repsFrom we should be using RPCs + # IDL_DRSReplicaAdd, IDL_DRSReplicaModify, and + # IDL_DRSReplicaDel to affect a repsFrom change. + # + # Those RPCs are missing in samba, so I'll have to + # implement them to get this to more accurately + # reflect the reference docs. As of right now this + # commit to the database will work as its what the + # older KCC also did + modify = False + newreps = [] + delreps = [] + + for repsFrom in self.rep_repsFrom: + + # Leave out any to be deleted from + # replacement list. Build a list + # of to be deleted reps which we will + # remove from rep_repsFrom list below + if repsFrom.to_be_deleted: + delreps.append(repsFrom) + modify = True + continue + + if repsFrom.is_modified(): + repsFrom.set_unmodified() + modify = True + + # current (unmodified) elements also get + # appended here but no changes will occur + # unless something is "to be modified" or + # "to be deleted" + newreps.append(ndr_pack(repsFrom.ndr_blob)) + + # Now delete these from our list of rep_repsFrom + for repsFrom in delreps: + self.rep_repsFrom.remove(repsFrom) + delreps = [] + + # Nothing to do if no reps have been modified or + # need to be deleted or input option has informed + # us to be "readonly" (ro). Leave database + # record "as is" + if not modify or ro: + return + + m = ldb.Message() + m.dn = ldb.Dn(samdb, self.nc_dnstr) + + m["repsFrom"] = \ + ldb.MessageElement(newreps, ldb.FLAG_MOD_REPLACE, "repsFrom") + + try: + samdb.modify(m) + + except ldb.LdbError as estr: + raise KCCError("Could not set repsFrom for (%s) - (%s)" % + (self.nc_dnstr, estr)) + + def load_replUpToDateVector(self, samdb): + """Given an NC replica which has been discovered thru the nTDSDSA + database object, load the replUpToDateVector attribute for the + local replica. held by my dsa. The replUpToDateVector + attribute is not replicated so this attribute is relative only + to the local DSA that the samdb exists on + + """ + try: + res = samdb.search(base=self.nc_dnstr, scope=ldb.SCOPE_BASE, + attrs=["replUpToDateVector"]) + + except ldb.LdbError as e2: + (enum, estr) = e2.args + raise KCCError("Unable to find NC for (%s) - (%s)" % + (self.nc_dnstr, estr)) + + msg = res[0] + + # Possibly no replUpToDateVector if this is a singleton DC + if "replUpToDateVector" in msg: + value = msg["replUpToDateVector"][0] + blob = ndr_unpack(drsblobs.replUpToDateVectorBlob, + value) + if blob.version != 2: + # Samba only generates version 2, and this runs locally + raise AttributeError("Unexpected replUpToDateVector version %d" + % blob.version) + + self.rep_replUpToDateVector_cursors = blob.ctr.cursors + else: + self.rep_replUpToDateVector_cursors = [] + + def dumpstr_to_be_deleted(self): + return '\n'.join(str(x) for x in self.rep_repsFrom if x.to_be_deleted) + + def dumpstr_to_be_modified(self): + return '\n'.join(str(x) for x in self.rep_repsFrom if x.is_modified()) + + def load_fsmo_roles(self, samdb): + """Given an NC replica which has been discovered thru the nTDSDSA + database object, load the fSMORoleOwner attribute. + """ + try: + res = samdb.search(base=self.nc_dnstr, scope=ldb.SCOPE_BASE, + attrs=["fSMORoleOwner"]) + + except ldb.LdbError as e3: + (enum, estr) = e3.args + raise KCCError("Unable to find NC for (%s) - (%s)" % + (self.nc_dnstr, estr)) + + msg = res[0] + + # Possibly no fSMORoleOwner + if "fSMORoleOwner" in msg: + self.rep_fsmo_role_owner = msg["fSMORoleOwner"] + + def is_fsmo_role_owner(self, dsa_dnstr): + if self.rep_fsmo_role_owner is not None and \ + self.rep_fsmo_role_owner == dsa_dnstr: + return True + return False + + def load_repsTo(self, samdb): + """Given an NC replica which has been discovered thru the nTDSDSA + database object, load the repsTo attribute for the local replica. + held by my dsa. The repsTo attribute is not replicated so this + attribute is relative only to the local DSA that the samdb exists on + + This is responsible for push replication, not scheduled pull + replication. Not to be confused for repsFrom. + """ + try: + res = samdb.search(base=self.nc_dnstr, scope=ldb.SCOPE_BASE, + attrs=["repsTo"]) + + except ldb.LdbError as e4: + (enum, estr) = e4.args + raise KCCError("Unable to find NC for (%s) - (%s)" % + (self.nc_dnstr, estr)) + + msg = res[0] + + # Possibly no repsTo if this is a singleton DC + if "repsTo" in msg: + for value in msg["repsTo"]: + try: + unpacked = ndr_unpack(drsblobs.repsFromToBlob, value) + except RuntimeError as e: + print("bad repsTo NDR: %r" % (value), + file=sys.stderr) + continue + rep = RepsFromTo(self.nc_dnstr, unpacked) + self.rep_repsTo.append(rep) + + def commit_repsTo(self, samdb, ro=False): + """Commit repsTo to the database""" + + # XXX - This is not truly correct according to the MS-TECH + # docs. To commit a repsTo we should be using RPCs + # IDL_DRSReplicaAdd, IDL_DRSReplicaModify, and + # IDL_DRSReplicaDel to affect a repsTo change. + # + # Those RPCs are missing in samba, so I'll have to + # implement them to get this to more accurately + # reflect the reference docs. As of right now this + # commit to the database will work as its what the + # older KCC also did + modify = False + newreps = [] + delreps = [] + + for repsTo in self.rep_repsTo: + + # Leave out any to be deleted from + # replacement list. Build a list + # of to be deleted reps which we will + # remove from rep_repsTo list below + if repsTo.to_be_deleted: + delreps.append(repsTo) + modify = True + continue + + if repsTo.is_modified(): + repsTo.set_unmodified() + modify = True + + # current (unmodified) elements also get + # appended here but no changes will occur + # unless something is "to be modified" or + # "to be deleted" + newreps.append(ndr_pack(repsTo.ndr_blob)) + + # Now delete these from our list of rep_repsTo + for repsTo in delreps: + self.rep_repsTo.remove(repsTo) + delreps = [] + + # Nothing to do if no reps have been modified or + # need to be deleted or input option has informed + # us to be "readonly" (ro). Leave database + # record "as is" + if not modify or ro: + return + + m = ldb.Message() + m.dn = ldb.Dn(samdb, self.nc_dnstr) + + m["repsTo"] = \ + ldb.MessageElement(newreps, ldb.FLAG_MOD_REPLACE, "repsTo") + + try: + samdb.modify(m) + + except ldb.LdbError as estr: + raise KCCError("Could not set repsTo for (%s) - (%s)" % + (self.nc_dnstr, estr)) + + +class DirectoryServiceAgent(object): + + def __init__(self, dsa_dnstr): + """Initialize DSA class. + + Class is subsequently fully populated by calling the load_dsa() method + + :param dsa_dnstr: DN of the nTDSDSA + """ + self.dsa_dnstr = dsa_dnstr + self.dsa_guid = None + self.dsa_ivid = None + self.dsa_is_ro = False + self.dsa_is_istg = False + self.options = 0 + self.dsa_behavior = 0 + self.default_dnstr = None # default domain dn string for dsa + + # NCReplicas for this dsa that are "present" + # Indexed by DN string of naming context + self.current_rep_table = {} + + # NCReplicas for this dsa that "should be present" + # Indexed by DN string of naming context + self.needed_rep_table = {} + + # NTDSConnections for this dsa. These are current + # valid connections that are committed or pending a commit + # in the database. Indexed by DN string of connection + self.connect_table = {} + + def __str__(self): + """Debug dump string output of class""" + + text = "%s:" % self.__class__.__name__ + if self.dsa_dnstr is not None: + text = text + "\n\tdsa_dnstr=%s" % self.dsa_dnstr + if self.dsa_guid is not None: + text = text + "\n\tdsa_guid=%s" % str(self.dsa_guid) + if self.dsa_ivid is not None: + text = text + "\n\tdsa_ivid=%s" % str(self.dsa_ivid) + + text += "\n\tro=%s" % self.is_ro() +\ + "\n\tgc=%s" % self.is_gc() +\ + "\n\tistg=%s" % self.is_istg() +\ + "\ncurrent_replica_table:" +\ + "\n%s" % self.dumpstr_current_replica_table() +\ + "\nneeded_replica_table:" +\ + "\n%s" % self.dumpstr_needed_replica_table() +\ + "\nconnect_table:" +\ + "\n%s" % self.dumpstr_connect_table() + + return text + + def get_current_replica(self, nc_dnstr): + return self.current_rep_table.get(nc_dnstr) + + def is_istg(self): + """Returns True if dsa is intersite topology generator for it's site""" + # The KCC on an RODC always acts as an ISTG for itself + return self.dsa_is_istg or self.dsa_is_ro + + def is_ro(self): + """Returns True if dsa a read only domain controller""" + return self.dsa_is_ro + + def is_gc(self): + """Returns True if dsa hosts a global catalog""" + if (self.options & dsdb.DS_NTDSDSA_OPT_IS_GC) != 0: + return True + return False + + def is_minimum_behavior(self, version): + """Is dsa at minimum windows level greater than or equal to (version) + + :param version: Windows version to test against + (e.g. DS_DOMAIN_FUNCTION_2008) + """ + if self.dsa_behavior >= version: + return True + return False + + def is_translate_ntdsconn_disabled(self): + """Whether this allows NTDSConnection translation in its options.""" + if (self.options & dsdb.DS_NTDSDSA_OPT_DISABLE_NTDSCONN_XLATE) != 0: + return True + return False + + def get_rep_tables(self): + """Return DSA current and needed replica tables + """ + return self.current_rep_table, self.needed_rep_table + + def get_parent_dnstr(self): + """Get the parent DN string of this object.""" + head, sep, tail = self.dsa_dnstr.partition(',') + return tail + + def load_dsa(self, samdb): + """Load a DSA from the samdb. + + Prior initialization has given us the DN of the DSA that we are to + load. This method initializes all other attributes, including loading + the NC replica table for this DSA. + """ + attrs = ["objectGUID", + "invocationID", + "options", + "msDS-isRODC", + "msDS-Behavior-Version"] + try: + res = samdb.search(base=self.dsa_dnstr, scope=ldb.SCOPE_BASE, + attrs=attrs) + + except ldb.LdbError as e5: + (enum, estr) = e5.args + raise KCCError("Unable to find nTDSDSA for (%s) - (%s)" % + (self.dsa_dnstr, estr)) + + msg = res[0] + self.dsa_guid = misc.GUID(samdb.schema_format_value("objectGUID", + msg["objectGUID"][0])) + + # RODCs don't originate changes and thus have no invocationId, + # therefore we must check for existence first + if "invocationId" in msg: + self.dsa_ivid = misc.GUID(samdb.schema_format_value("objectGUID", + msg["invocationId"][0])) + + if "options" in msg: + self.options = int(msg["options"][0]) + + if "msDS-isRODC" in msg and str(msg["msDS-isRODC"][0]) == "TRUE": + self.dsa_is_ro = True + else: + self.dsa_is_ro = False + + if "msDS-Behavior-Version" in msg: + self.dsa_behavior = int(msg['msDS-Behavior-Version'][0]) + + # Load the NC replicas that are enumerated on this dsa + self.load_current_replica_table(samdb) + + # Load the nTDSConnection that are enumerated on this dsa + self.load_connection_table(samdb) + + def load_current_replica_table(self, samdb): + """Method to load the NC replica's listed for DSA object. + + This method queries the samdb for (hasMasterNCs, msDS-hasMasterNCs, + hasPartialReplicaNCs, msDS-HasDomainNCs, msDS-hasFullReplicaNCs, and + msDS-HasInstantiatedNCs) to determine complete list of NC replicas that + are enumerated for the DSA. Once a NC replica is loaded it is + identified (schema, config, etc) and the other replica attributes + (partial, ro, etc) are determined. + + :param samdb: database to query for DSA replica list + """ + ncattrs = [ + # not RODC - default, config, schema (old style) + "hasMasterNCs", + # not RODC - default, config, schema, app NCs + "msDS-hasMasterNCs", + # domain NC partial replicas + "hasPartialReplicaNCs", + # default domain NC + "msDS-HasDomainNCs", + # RODC only - default, config, schema, app NCs + "msDS-hasFullReplicaNCs", + # Identifies if replica is coming, going, or stable + "msDS-HasInstantiatedNCs" + ] + try: + res = samdb.search(base=self.dsa_dnstr, scope=ldb.SCOPE_BASE, + attrs=ncattrs) + + except ldb.LdbError as e6: + (enum, estr) = e6.args + raise KCCError("Unable to find nTDSDSA NCs for (%s) - (%s)" % + (self.dsa_dnstr, estr)) + + # The table of NCs for the dsa we are searching + tmp_table = {} + + # We should get one response to our query here for + # the ntds that we requested + if len(res[0]) > 0: + + # Our response will contain a number of elements including + # the dn of the dsa as well as elements for each + # attribute (e.g. hasMasterNCs). Each of these elements + # is a dictionary list which we retrieve the keys for and + # then iterate over them + for k in res[0].keys(): + if k == "dn": + continue + + # For each attribute type there will be one or more DNs + # listed. For instance DCs normally have 3 hasMasterNCs + # listed. + for value in res[0][k]: + # Turn dn into a dsdb_Dn so we can use + # its methods to parse a binary DN + dsdn = dsdb_Dn(samdb, value.decode('utf8')) + flags = dsdn.get_binary_integer() + dnstr = str(dsdn.dn) + + if dnstr not in tmp_table: + rep = NCReplica(self, dnstr) + tmp_table[dnstr] = rep + else: + rep = tmp_table[dnstr] + + if k == "msDS-HasInstantiatedNCs": + rep.set_instantiated_flags(flags) + continue + + rep.identify_by_dsa_attr(samdb, k) + + # if we've identified the default domain NC + # then save its DN string + if rep.is_default(): + self.default_dnstr = dnstr + else: + raise KCCError("No nTDSDSA NCs for (%s)" % self.dsa_dnstr) + + # Assign our newly built NC replica table to this dsa + self.current_rep_table = tmp_table + + def add_needed_replica(self, rep): + """Method to add a NC replica that "should be present" to the + needed_rep_table. + """ + self.needed_rep_table[rep.nc_dnstr] = rep + + def load_connection_table(self, samdb): + """Method to load the nTDSConnections listed for DSA object. + + :param samdb: database to query for DSA connection list + """ + try: + res = samdb.search(base=self.dsa_dnstr, + scope=ldb.SCOPE_SUBTREE, + expression="(objectClass=nTDSConnection)") + + except ldb.LdbError as e7: + (enum, estr) = e7.args + raise KCCError("Unable to find nTDSConnection for (%s) - (%s)" % + (self.dsa_dnstr, estr)) + + for msg in res: + dnstr = str(msg.dn) + + # already loaded + if dnstr in self.connect_table: + continue + + connect = NTDSConnection(dnstr) + + connect.load_connection(samdb) + self.connect_table[dnstr] = connect + + def commit_connections(self, samdb, ro=False): + """Method to commit any uncommitted nTDSConnections + modifications that are in our table. These would be + identified connections that are marked to be added or + deleted + + :param samdb: database to commit DSA connection list to + :param ro: if (true) then perform internal operations but + do not write to the database (readonly) + """ + delconn = [] + + for dnstr, connect in self.connect_table.items(): + if connect.to_be_added: + connect.commit_added(samdb, ro) + + if connect.to_be_modified: + connect.commit_modified(samdb, ro) + + if connect.to_be_deleted: + connect.commit_deleted(samdb, ro) + delconn.append(dnstr) + + # Now delete the connection from the table + for dnstr in delconn: + del self.connect_table[dnstr] + + def add_connection(self, dnstr, connect): + assert dnstr not in self.connect_table + self.connect_table[dnstr] = connect + + def get_connection_by_from_dnstr(self, from_dnstr): + """Scan DSA nTDSConnection table and return connection + with a "fromServer" dn string equivalent to method + input parameter. + + :param from_dnstr: search for this from server entry + """ + answer = [] + for connect in self.connect_table.values(): + if connect.get_from_dnstr() == from_dnstr: + answer.append(connect) + + return answer + + def dumpstr_current_replica_table(self): + """Debug dump string output of current replica table""" + return '\n'.join(str(x) for x in self.current_rep_table) + + def dumpstr_needed_replica_table(self): + """Debug dump string output of needed replica table""" + return '\n'.join(str(x) for x in self.needed_rep_table) + + def dumpstr_connect_table(self): + """Debug dump string output of connect table""" + return '\n'.join(str(x) for x in self.connect_table) + + def new_connection(self, options, system_flags, transport, from_dnstr, + sched): + """Set up a new connection for the DSA based on input + parameters. Connection will be added to the DSA + connect_table and will be marked as "to be added" pending + a call to commit_connections() + """ + dnstr = "CN=%s," % str(uuid.uuid4()) + self.dsa_dnstr + + connect = NTDSConnection(dnstr) + connect.to_be_added = True + connect.enabled = True + connect.from_dnstr = from_dnstr + connect.options = options + connect.system_flags = system_flags + + if transport is not None: + connect.transport_dnstr = transport.dnstr + connect.transport_guid = transport.guid + + if sched is not None: + connect.schedule = sched + else: + # Create schedule. Attribute value set according to MS-TECH + # intra-site connection creation document + connect.schedule = new_connection_schedule() + + self.add_connection(dnstr, connect) + return connect + + +class NTDSConnection(object): + """Class defines a nTDSConnection found under a DSA + """ + def __init__(self, dnstr): + self.dnstr = dnstr + self.guid = None + self.enabled = False + self.whenCreated = 0 + self.to_be_added = False # new connection needs to be added + self.to_be_deleted = False # old connection needs to be deleted + self.to_be_modified = False + self.options = 0 + self.system_flags = 0 + self.transport_dnstr = None + self.transport_guid = None + self.from_dnstr = None + self.schedule = None + + def __str__(self): + """Debug dump string output of NTDSConnection object""" + + text = "%s:\n\tdn=%s" % (self.__class__.__name__, self.dnstr) +\ + "\n\tenabled=%s" % self.enabled +\ + "\n\tto_be_added=%s" % self.to_be_added +\ + "\n\tto_be_deleted=%s" % self.to_be_deleted +\ + "\n\tto_be_modified=%s" % self.to_be_modified +\ + "\n\toptions=0x%08X" % self.options +\ + "\n\tsystem_flags=0x%08X" % self.system_flags +\ + "\n\twhenCreated=%d" % self.whenCreated +\ + "\n\ttransport_dn=%s" % self.transport_dnstr + + if self.guid is not None: + text += "\n\tguid=%s" % str(self.guid) + + if self.transport_guid is not None: + text += "\n\ttransport_guid=%s" % str(self.transport_guid) + + text = text + "\n\tfrom_dn=%s" % self.from_dnstr + + if self.schedule is not None: + text += "\n\tschedule.size=%s" % self.schedule.size +\ + "\n\tschedule.bandwidth=%s" % self.schedule.bandwidth +\ + ("\n\tschedule.numberOfSchedules=%s" % + self.schedule.numberOfSchedules) + + for i, header in enumerate(self.schedule.headerArray): + text += ("\n\tschedule.headerArray[%d].type=%d" % + (i, header.type)) +\ + ("\n\tschedule.headerArray[%d].offset=%d" % + (i, header.offset)) +\ + "\n\tschedule.dataArray[%d].slots[ " % i +\ + "".join("0x%X " % slot for slot in self.schedule.dataArray[i].slots) +\ + "]" + + return text + + def load_connection(self, samdb): + """Given a NTDSConnection object with an prior initialization + for the object's DN, search for the DN and load attributes + from the samdb. + """ + attrs = ["options", + "enabledConnection", + "schedule", + "whenCreated", + "objectGUID", + "transportType", + "fromServer", + "systemFlags"] + try: + res = samdb.search(base=self.dnstr, scope=ldb.SCOPE_BASE, + attrs=attrs) + + except ldb.LdbError as e8: + (enum, estr) = e8.args + raise KCCError("Unable to find nTDSConnection for (%s) - (%s)" % + (self.dnstr, estr)) + + msg = res[0] + + if "options" in msg: + self.options = int(msg["options"][0]) + + if "enabledConnection" in msg: + if str(msg["enabledConnection"][0]).upper().lstrip().rstrip() == "TRUE": + self.enabled = True + + if "systemFlags" in msg: + self.system_flags = int(msg["systemFlags"][0]) + + try: + self.guid = \ + misc.GUID(samdb.schema_format_value("objectGUID", + msg["objectGUID"][0])) + except KeyError: + raise KCCError("Unable to find objectGUID in nTDSConnection " + "for (%s)" % (self.dnstr)) + + if "transportType" in msg: + dsdn = dsdb_Dn(samdb, msg["transportType"][0].decode('utf8')) + self.load_connection_transport(samdb, str(dsdn.dn)) + + if "schedule" in msg: + self.schedule = ndr_unpack(drsblobs.schedule, msg["schedule"][0]) + + if "whenCreated" in msg: + self.whenCreated = ldb.string_to_time(str(msg["whenCreated"][0])) + + if "fromServer" in msg: + dsdn = dsdb_Dn(samdb, msg["fromServer"][0].decode('utf8')) + self.from_dnstr = str(dsdn.dn) + assert self.from_dnstr is not None + + def load_connection_transport(self, samdb, tdnstr): + """Given a NTDSConnection object which enumerates a transport + DN, load the transport information for the connection object + + :param tdnstr: transport DN to load + """ + attrs = ["objectGUID"] + try: + res = samdb.search(base=tdnstr, + scope=ldb.SCOPE_BASE, attrs=attrs) + + except ldb.LdbError as e9: + (enum, estr) = e9.args + raise KCCError("Unable to find transport (%s) - (%s)" % + (tdnstr, estr)) + + if "objectGUID" in res[0]: + msg = res[0] + self.transport_dnstr = tdnstr + self.transport_guid = \ + misc.GUID(samdb.schema_format_value("objectGUID", + msg["objectGUID"][0])) + assert self.transport_dnstr is not None + assert self.transport_guid is not None + + def commit_deleted(self, samdb, ro=False): + """Local helper routine for commit_connections() which + handles committed connections that are to be deleted from + the database database + """ + assert self.to_be_deleted + self.to_be_deleted = False + + # No database modification requested + if ro: + return + + try: + samdb.delete(self.dnstr) + except ldb.LdbError as e10: + (enum, estr) = e10.args + raise KCCError("Could not delete nTDSConnection for (%s) - (%s)" % + (self.dnstr, estr)) + + def commit_added(self, samdb, ro=False): + """Local helper routine for commit_connections() which + handles committed connections that are to be added to the + database + """ + assert self.to_be_added + self.to_be_added = False + + # No database modification requested + if ro: + return + + # First verify we don't have this entry to ensure nothing + # is programmatically amiss + found = False + try: + msg = samdb.search(base=self.dnstr, scope=ldb.SCOPE_BASE) + if len(msg) != 0: + found = True + + except ldb.LdbError as e11: + (enum, estr) = e11.args + if enum != ldb.ERR_NO_SUCH_OBJECT: + raise KCCError("Unable to search for (%s) - (%s)" % + (self.dnstr, estr)) + if found: + raise KCCError("nTDSConnection for (%s) already exists!" % + self.dnstr) + + if self.enabled: + enablestr = "TRUE" + else: + enablestr = "FALSE" + + # Prepare a message for adding to the samdb + m = ldb.Message() + m.dn = ldb.Dn(samdb, self.dnstr) + + m["objectClass"] = \ + ldb.MessageElement("nTDSConnection", ldb.FLAG_MOD_ADD, + "objectClass") + m["showInAdvancedViewOnly"] = \ + ldb.MessageElement("TRUE", ldb.FLAG_MOD_ADD, + "showInAdvancedViewOnly") + m["enabledConnection"] = \ + ldb.MessageElement(enablestr, ldb.FLAG_MOD_ADD, + "enabledConnection") + m["fromServer"] = \ + ldb.MessageElement(self.from_dnstr, ldb.FLAG_MOD_ADD, "fromServer") + m["options"] = \ + ldb.MessageElement(str(self.options), ldb.FLAG_MOD_ADD, "options") + m["systemFlags"] = \ + ldb.MessageElement(str(self.system_flags), ldb.FLAG_MOD_ADD, + "systemFlags") + + if self.transport_dnstr is not None: + m["transportType"] = \ + ldb.MessageElement(str(self.transport_dnstr), ldb.FLAG_MOD_ADD, + "transportType") + + if self.schedule is not None: + m["schedule"] = \ + ldb.MessageElement(ndr_pack(self.schedule), + ldb.FLAG_MOD_ADD, "schedule") + try: + samdb.add(m) + except ldb.LdbError as e12: + (enum, estr) = e12.args + raise KCCError("Could not add nTDSConnection for (%s) - (%s)" % + (self.dnstr, estr)) + + def commit_modified(self, samdb, ro=False): + """Local helper routine for commit_connections() which + handles committed connections that are to be modified to the + database + """ + assert self.to_be_modified + self.to_be_modified = False + + # No database modification requested + if ro: + return + + # First verify we have this entry to ensure nothing + # is programmatically amiss + try: + # we don't use the search result, but it tests the status + # of self.dnstr in the database. + samdb.search(base=self.dnstr, scope=ldb.SCOPE_BASE) + + except ldb.LdbError as e13: + (enum, estr) = e13.args + if enum == ldb.ERR_NO_SUCH_OBJECT: + raise KCCError("nTDSConnection for (%s) doesn't exist!" % + self.dnstr) + raise KCCError("Unable to search for (%s) - (%s)" % + (self.dnstr, estr)) + + if self.enabled: + enablestr = "TRUE" + else: + enablestr = "FALSE" + + # Prepare a message for modifying the samdb + m = ldb.Message() + m.dn = ldb.Dn(samdb, self.dnstr) + + m["enabledConnection"] = \ + ldb.MessageElement(enablestr, ldb.FLAG_MOD_REPLACE, + "enabledConnection") + m["fromServer"] = \ + ldb.MessageElement(self.from_dnstr, ldb.FLAG_MOD_REPLACE, + "fromServer") + m["options"] = \ + ldb.MessageElement(str(self.options), ldb.FLAG_MOD_REPLACE, + "options") + m["systemFlags"] = \ + ldb.MessageElement(str(self.system_flags), ldb.FLAG_MOD_REPLACE, + "systemFlags") + + if self.transport_dnstr is not None: + m["transportType"] = \ + ldb.MessageElement(str(self.transport_dnstr), + ldb.FLAG_MOD_REPLACE, "transportType") + else: + m["transportType"] = \ + ldb.MessageElement([], ldb.FLAG_MOD_DELETE, "transportType") + + if self.schedule is not None: + m["schedule"] = \ + ldb.MessageElement(ndr_pack(self.schedule), + ldb.FLAG_MOD_REPLACE, "schedule") + else: + m["schedule"] = \ + ldb.MessageElement([], ldb.FLAG_MOD_DELETE, "schedule") + try: + samdb.modify(m) + except ldb.LdbError as e14: + (enum, estr) = e14.args + raise KCCError("Could not modify nTDSConnection for (%s) - (%s)" % + (self.dnstr, estr)) + + def set_modified(self, truefalse): + self.to_be_modified = truefalse + + def is_schedule_minimum_once_per_week(self): + """Returns True if our schedule includes at least one + replication interval within the week. False otherwise + """ + # replinfo schedule is None means "always", while + # NTDSConnection schedule is None means "never". + if self.schedule is None or self.schedule.dataArray[0] is None: + return False + + for slot in self.schedule.dataArray[0].slots: + if (slot & 0x0F) != 0x0: + return True + return False + + def is_equivalent_schedule(self, sched): + """Returns True if our schedule is equivalent to the input + comparison schedule. + + :param shed: schedule to compare to + """ + # There are 4 cases, where either self.schedule or sched can be None + # + # | self. is None | self. is not None + # --------------+-----------------+-------------------- + # sched is None | True | False + # --------------+-----------------+-------------------- + # sched is not None | False | do calculations + + if self.schedule is None: + return sched is None + + if sched is None: + return False + + if ((self.schedule.size != sched.size or + self.schedule.bandwidth != sched.bandwidth or + self.schedule.numberOfSchedules != sched.numberOfSchedules)): + return False + + for i, header in enumerate(self.schedule.headerArray): + + if self.schedule.headerArray[i].type != sched.headerArray[i].type: + return False + + if self.schedule.headerArray[i].offset != \ + sched.headerArray[i].offset: + return False + + for a, b in zip(self.schedule.dataArray[i].slots, + sched.dataArray[i].slots): + if a != b: + return False + return True + + def is_rodc_topology(self): + """Returns True if NTDS Connection specifies RODC + topology only + """ + if self.options & dsdb.NTDSCONN_OPT_RODC_TOPOLOGY == 0: + return False + return True + + def is_generated(self): + """Returns True if NTDS Connection was generated by the + KCC topology algorithm as opposed to set by the administrator + """ + if self.options & dsdb.NTDSCONN_OPT_IS_GENERATED == 0: + return False + return True + + def is_override_notify_default(self): + """Returns True if NTDS Connection should override notify default + """ + if self.options & dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT == 0: + return False + return True + + def is_use_notify(self): + """Returns True if NTDS Connection should use notify + """ + if self.options & dsdb.NTDSCONN_OPT_USE_NOTIFY == 0: + return False + return True + + def is_twoway_sync(self): + """Returns True if NTDS Connection should use twoway sync + """ + if self.options & dsdb.NTDSCONN_OPT_TWOWAY_SYNC == 0: + return False + return True + + def is_intersite_compression_disabled(self): + """Returns True if NTDS Connection intersite compression + is disabled + """ + if self.options & dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION == 0: + return False + return True + + def is_user_owned_schedule(self): + """Returns True if NTDS Connection has a user owned schedule + """ + if self.options & dsdb.NTDSCONN_OPT_USER_OWNED_SCHEDULE == 0: + return False + return True + + def is_enabled(self): + """Returns True if NTDS Connection is enabled + """ + return self.enabled + + def get_from_dnstr(self): + """Return fromServer dn string attribute""" + return self.from_dnstr + + +class Partition(NamingContext): + """A naming context discovered thru Partitions DN of the config schema. + + This is a more specific form of NamingContext class (inheriting from that + class) and it identifies unique attributes enumerated in the Partitions + such as which nTDSDSAs are cross referenced for replicas + """ + def __init__(self, partstr): + self.partstr = partstr + self.enabled = True + self.system_flags = 0 + self.rw_location_list = [] + self.ro_location_list = [] + + # We don't have enough info to properly + # fill in the naming context yet. We'll get that + # fully set up with load_partition(). + NamingContext.__init__(self, None) + + def load_partition(self, samdb): + """Given a Partition class object that has been initialized with its + partition dn string, load the partition from the sam database, identify + the type of the partition (schema, domain, etc) and record the list of + nTDSDSAs that appear in the cross reference attributes + msDS-NC-Replica-Locations and msDS-NC-RO-Replica-Locations. + + :param samdb: sam database to load partition from + """ + attrs = ["nCName", + "Enabled", + "systemFlags", + "msDS-NC-Replica-Locations", + "msDS-NC-RO-Replica-Locations"] + try: + res = samdb.search(base=self.partstr, scope=ldb.SCOPE_BASE, + attrs=attrs) + + except ldb.LdbError as e15: + (enum, estr) = e15.args + raise KCCError("Unable to find partition for (%s) - (%s)" % + (self.partstr, estr)) + msg = res[0] + for k in msg.keys(): + if k == "dn": + continue + + if k == "Enabled": + if str(msg[k][0]).upper().lstrip().rstrip() == "TRUE": + self.enabled = True + else: + self.enabled = False + continue + + if k == "systemFlags": + self.system_flags = int(msg[k][0]) + continue + + for value in msg[k]: + dsdn = dsdb_Dn(samdb, value.decode('utf8')) + dnstr = str(dsdn.dn) + + if k == "nCName": + self.nc_dnstr = dnstr + continue + + if k == "msDS-NC-Replica-Locations": + self.rw_location_list.append(dnstr) + continue + + if k == "msDS-NC-RO-Replica-Locations": + self.ro_location_list.append(dnstr) + continue + + # Now identify what type of NC this partition + # enumerated + self.identify_by_basedn(samdb) + + def is_enabled(self): + """Returns True if partition is enabled + """ + return self.is_enabled + + def is_foreign(self): + """Returns True if this is not an Active Directory NC in our + forest but is instead something else (e.g. a foreign NC) + """ + if (self.system_flags & dsdb.SYSTEM_FLAG_CR_NTDS_NC) == 0: + return True + else: + return False + + def should_be_present(self, target_dsa): + """Tests whether this partition should have an NC replica + on the target dsa. This method returns a tuple of + needed=True/False, ro=True/False, partial=True/False + + :param target_dsa: should NC be present on target dsa + """ + ro = False + partial = False + + # If this is the config, schema, or default + # domain NC for the target dsa then it should + # be present + needed = (self.nc_type == NCType.config or + self.nc_type == NCType.schema or + (self.nc_type == NCType.domain and + self.nc_dnstr == target_dsa.default_dnstr)) + + # A writable replica of an application NC should be present + # if there a cross reference to the target DSA exists. Depending + # on whether the DSA is ro we examine which type of cross reference + # to look for (msDS-NC-Replica-Locations or + # msDS-NC-RO-Replica-Locations + if self.nc_type == NCType.application: + if target_dsa.is_ro(): + if target_dsa.dsa_dnstr in self.ro_location_list: + needed = True + else: + if target_dsa.dsa_dnstr in self.rw_location_list: + needed = True + + # If the target dsa is a gc then a partial replica of a + # domain NC (other than the DSAs default domain) should exist + # if there is also a cross reference for the DSA + if (target_dsa.is_gc() and + self.nc_type == NCType.domain and + self.nc_dnstr != target_dsa.default_dnstr and + (target_dsa.dsa_dnstr in self.ro_location_list or + target_dsa.dsa_dnstr in self.rw_location_list)): + needed = True + partial = True + + # partial NCs are always readonly + if needed and (target_dsa.is_ro() or partial): + ro = True + + return needed, ro, partial + + def __str__(self): + """Debug dump string output of class""" + text = "%s" % NamingContext.__str__(self) +\ + "\n\tpartdn=%s" % self.partstr +\ + "".join("\n\tmsDS-NC-Replica-Locations=%s" % k for k in self.rw_location_list) +\ + "".join("\n\tmsDS-NC-RO-Replica-Locations=%s" % k for k in self.ro_location_list) + return text + + +class Site(object): + """An individual site object discovered thru the configuration + naming context. Contains all DSAs that exist within the site + """ + def __init__(self, site_dnstr, nt_now): + self.site_dnstr = site_dnstr + self.site_guid = None + self.site_options = 0 + self.site_topo_generator = None + self.site_topo_failover = 0 # appears to be in minutes + self.dsa_table = {} + self.rw_dsa_table = {} + self.nt_now = nt_now + + def load_site(self, samdb): + """Loads the NTDS Site Settings options attribute for the site + as well as querying and loading all DSAs that appear within + the site. + """ + ssdn = "CN=NTDS Site Settings,%s" % self.site_dnstr + attrs = ["options", + "interSiteTopologyFailover", + "interSiteTopologyGenerator"] + try: + res = samdb.search(base=ssdn, scope=ldb.SCOPE_BASE, + attrs=attrs) + self_res = samdb.search(base=self.site_dnstr, scope=ldb.SCOPE_BASE, + attrs=['objectGUID']) + except ldb.LdbError as e16: + (enum, estr) = e16.args + raise KCCError("Unable to find site settings for (%s) - (%s)" % + (ssdn, estr)) + + msg = res[0] + if "options" in msg: + self.site_options = int(msg["options"][0]) + + if "interSiteTopologyGenerator" in msg: + self.site_topo_generator = \ + str(msg["interSiteTopologyGenerator"][0]) + + if "interSiteTopologyFailover" in msg: + self.site_topo_failover = int(msg["interSiteTopologyFailover"][0]) + + msg = self_res[0] + if "objectGUID" in msg: + self.site_guid = misc.GUID(samdb.schema_format_value("objectGUID", + msg["objectGUID"][0])) + + self.load_all_dsa(samdb) + + def load_all_dsa(self, samdb): + """Discover all nTDSDSA thru the sites entry and + instantiate and load the DSAs. Each dsa is inserted + into the dsa_table by dn string. + """ + try: + res = samdb.search(self.site_dnstr, + scope=ldb.SCOPE_SUBTREE, + expression="(objectClass=nTDSDSA)") + except ldb.LdbError as e17: + (enum, estr) = e17.args + raise KCCError("Unable to find nTDSDSAs - (%s)" % estr) + + for msg in res: + dnstr = str(msg.dn) + + # already loaded + if dnstr in self.dsa_table: + continue + + dsa = DirectoryServiceAgent(dnstr) + + dsa.load_dsa(samdb) + + # Assign this dsa to my dsa table + # and index by dsa dn + self.dsa_table[dnstr] = dsa + if not dsa.is_ro(): + self.rw_dsa_table[dnstr] = dsa + + def get_dsa(self, dnstr): + """Return a previously loaded DSA object by consulting + the sites dsa_table for the provided DSA dn string + + :return: None if DSA doesn't exist + """ + return self.dsa_table.get(dnstr) + + def select_istg(self, samdb, mydsa, ro): + """Determine if my DC should be an intersite topology + generator. If my DC is the istg and is both a writeable + DC and the database is opened in write mode then we perform + an originating update to set the interSiteTopologyGenerator + attribute in the NTDS Site Settings object. An RODC always + acts as an ISTG for itself. + """ + # The KCC on an RODC always acts as an ISTG for itself + if mydsa.dsa_is_ro: + mydsa.dsa_is_istg = True + self.site_topo_generator = mydsa.dsa_dnstr + return True + + c_rep = get_dsa_config_rep(mydsa) + + # Load repsFrom and replUpToDateVector if not already loaded + # so we can get the current state of the config replica and + # whether we are getting updates from the istg + c_rep.load_repsFrom(samdb) + + c_rep.load_replUpToDateVector(samdb) + + # From MS-ADTS 6.2.2.3.1 ISTG selection: + # First, the KCC on a writable DC determines whether it acts + # as an ISTG for its site + # + # Let s be the object such that s!lDAPDisplayName = nTDSDSA + # and classSchema in s!objectClass. + # + # Let D be the sequence of objects o in the site of the local + # DC such that o!objectCategory = s. D is sorted in ascending + # order by objectGUID. + # + # Which is a fancy way of saying "sort all the nTDSDSA objects + # in the site by guid in ascending order". Place sorted list + # in D_sort[] + D_sort = sorted( + self.rw_dsa_table.values(), + key=lambda dsa: ndr_pack(dsa.dsa_guid)) + + # double word number of 100 nanosecond intervals since 1600s + + # Let f be the duration o!interSiteTopologyFailover seconds, or 2 hours + # if o!interSiteTopologyFailover is 0 or has no value. + # + # Note: lastSuccess and ntnow are in 100 nanosecond intervals + # so it appears we have to turn f into the same interval + # + # interSiteTopologyFailover (if set) appears to be in minutes + # so we'll need to convert to seconds and then 100 nanosecond + # intervals + # XXX [MS-ADTS] 6.2.2.3.1 says it is seconds, not minutes. + # + # 10,000,000 is number of 100 nanosecond intervals in a second + if self.site_topo_failover == 0: + f = 2 * 60 * 60 * 10000000 + else: + f = self.site_topo_failover * 60 * 10000000 + + # Let o be the site settings object for the site of the local + # DC, or NULL if no such o exists. + d_dsa = self.dsa_table.get(self.site_topo_generator) + + # From MS-ADTS 6.2.2.3.1 ISTG selection: + # If o != NULL and o!interSiteTopologyGenerator is not the + # nTDSDSA object for the local DC and + # o!interSiteTopologyGenerator is an element dj of sequence D: + # + if d_dsa is not None and d_dsa is not mydsa: + # From MS-ADTS 6.2.2.3.1 ISTG Selection: + # Let c be the cursor in the replUpToDateVector variable + # associated with the NC replica of the config NC such + # that c.uuidDsa = dj!invocationId. If no such c exists + # (No evidence of replication from current ITSG): + # Let i = j. + # Let t = 0. + # + # Else if the current time < c.timeLastSyncSuccess - f + # (Evidence of time sync problem on current ISTG): + # Let i = 0. + # Let t = 0. + # + # Else (Evidence of replication from current ITSG): + # Let i = j. + # Let t = c.timeLastSyncSuccess. + # + # last_success appears to be a double word containing + # number of 100 nanosecond intervals since the 1600s + j_idx = D_sort.index(d_dsa) + + found = False + for cursor in c_rep.rep_replUpToDateVector_cursors: + if d_dsa.dsa_ivid == cursor.source_dsa_invocation_id: + found = True + break + + if not found: + i_idx = j_idx + t_time = 0 + + # XXX doc says current time < c.timeLastSyncSuccess - f + # which is true only if f is negative or clocks are wrong. + # f is not negative in the default case (2 hours). + elif self.nt_now - cursor.last_sync_success > f: + i_idx = 0 + t_time = 0 + else: + i_idx = j_idx + t_time = cursor.last_sync_success + + # Otherwise (Nominate local DC as ISTG): + # Let i be the integer such that di is the nTDSDSA + # object for the local DC. + # Let t = the current time. + else: + i_idx = D_sort.index(mydsa) + t_time = self.nt_now + + # Compute a function that maintains the current ISTG if + # it is alive, cycles through other candidates if not. + # + # Let k be the integer (i + ((current time - t) / + # o!interSiteTopologyFailover)) MOD |D|. + # + # Note: We don't want to divide by zero here so they must + # have meant "f" instead of "o!interSiteTopologyFailover" + k_idx = (i_idx + ((self.nt_now - t_time) // f)) % len(D_sort) + + # The local writable DC acts as an ISTG for its site if and + # only if dk is the nTDSDSA object for the local DC. If the + # local DC does not act as an ISTG, the KCC skips the + # remainder of this task. + d_dsa = D_sort[k_idx] + d_dsa.dsa_is_istg = True + + # Update if we are the ISTG, otherwise return + if d_dsa is not mydsa: + return False + + # Nothing to do + if self.site_topo_generator == mydsa.dsa_dnstr: + return True + + self.site_topo_generator = mydsa.dsa_dnstr + + # If readonly database then do not perform a + # persistent update + if ro: + return True + + # Perform update to the samdb + ssdn = "CN=NTDS Site Settings,%s" % self.site_dnstr + + m = ldb.Message() + m.dn = ldb.Dn(samdb, ssdn) + + m["interSiteTopologyGenerator"] = \ + ldb.MessageElement(mydsa.dsa_dnstr, ldb.FLAG_MOD_REPLACE, + "interSiteTopologyGenerator") + try: + samdb.modify(m) + + except ldb.LdbError as estr: + raise KCCError( + "Could not set interSiteTopologyGenerator for (%s) - (%s)" % + (ssdn, estr)) + return True + + def is_intrasite_topology_disabled(self): + """Returns True if intra-site topology is disabled for site""" + return (self.site_options & + dsdb.DS_NTDSSETTINGS_OPT_IS_AUTO_TOPOLOGY_DISABLED) != 0 + + def is_intersite_topology_disabled(self): + """Returns True if inter-site topology is disabled for site""" + return ((self.site_options & + dsdb.DS_NTDSSETTINGS_OPT_IS_INTER_SITE_AUTO_TOPOLOGY_DISABLED) + != 0) + + def is_random_bridgehead_disabled(self): + """Returns True if selection of random bridgehead is disabled""" + return (self.site_options & + dsdb.DS_NTDSSETTINGS_OPT_IS_RAND_BH_SELECTION_DISABLED) != 0 + + def is_detect_stale_disabled(self): + """Returns True if detect stale is disabled for site""" + return (self.site_options & + dsdb.DS_NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED) != 0 + + def is_cleanup_ntdsconn_disabled(self): + """Returns True if NTDS Connection cleanup is disabled for site""" + return (self.site_options & + dsdb.DS_NTDSSETTINGS_OPT_IS_TOPL_CLEANUP_DISABLED) != 0 + + def same_site(self, dsa): + """Return True if dsa is in this site""" + if self.get_dsa(dsa.dsa_dnstr): + return True + return False + + def is_rodc_site(self): + if len(self.dsa_table) > 0 and len(self.rw_dsa_table) == 0: + return True + return False + + def __str__(self): + """Debug dump string output of class""" + text = "%s:" % self.__class__.__name__ +\ + "\n\tdn=%s" % self.site_dnstr +\ + "\n\toptions=0x%X" % self.site_options +\ + "\n\ttopo_generator=%s" % self.site_topo_generator +\ + "\n\ttopo_failover=%d" % self.site_topo_failover + for key, dsa in self.dsa_table.items(): + text = text + "\n%s" % dsa + return text + + +class GraphNode(object): + """A graph node describing a set of edges that should be directed to it. + + Each edge is a connection for a particular naming context replica directed + from another node in the forest to this node. + """ + + def __init__(self, dsa_dnstr, max_node_edges): + """Instantiate the graph node according to a DSA dn string + + :param max_node_edges: maximum number of edges that should ever + be directed to the node + """ + self.max_edges = max_node_edges + self.dsa_dnstr = dsa_dnstr + self.edge_from = [] + + def __str__(self): + text = "%s:" % self.__class__.__name__ +\ + "\n\tdsa_dnstr=%s" % self.dsa_dnstr +\ + "\n\tmax_edges=%d" % self.max_edges + + for i, edge in enumerate(self.edge_from): + if isinstance(edge, str): + text += "\n\tedge_from[%d]=%s" % (i, edge) + + return text + + def add_edge_from(self, from_dsa_dnstr): + """Add an edge from the dsa to our graph nodes edge from list + + :param from_dsa_dnstr: the dsa that the edge emanates from + """ + assert isinstance(from_dsa_dnstr, str) + + # No edges from myself to myself + if from_dsa_dnstr == self.dsa_dnstr: + return False + # Only one edge from a particular node + if from_dsa_dnstr in self.edge_from: + return False + # Not too many edges + if len(self.edge_from) >= self.max_edges: + return False + self.edge_from.append(from_dsa_dnstr) + return True + + def add_edges_from_connections(self, dsa): + """For each nTDSConnection object associated with a particular + DSA, we test if it implies an edge to this graph node (i.e. + the "fromServer" attribute). If it does then we add an + edge from the server unless we are over the max edges for this + graph node + + :param dsa: dsa with a dnstr equivalent to his graph node + """ + for connect in dsa.connect_table.values(): + self.add_edge_from(connect.from_dnstr) + + def add_connections_from_edges(self, dsa, transport): + """For each edge directed to this graph node, ensure there + is a corresponding nTDSConnection object in the dsa. + """ + for edge_dnstr in self.edge_from: + connections = dsa.get_connection_by_from_dnstr(edge_dnstr) + + # For each edge directed to the NC replica that + # "should be present" on the local DC, the KCC determines + # whether an object c exists such that: + # + # c is a child of the DC's nTDSDSA object. + # c.objectCategory = nTDSConnection + # + # Given the NC replica ri from which the edge is directed, + # c.fromServer is the dsname of the nTDSDSA object of + # the DC on which ri "is present". + # + # c.options does not contain NTDSCONN_OPT_RODC_TOPOLOGY + + found_valid = False + for connect in connections: + if connect.is_rodc_topology(): + continue + found_valid = True + + if found_valid: + continue + + # if no such object exists then the KCC adds an object + # c with the following attributes + + # Generate a new dnstr for this nTDSConnection + opt = dsdb.NTDSCONN_OPT_IS_GENERATED + flags = (dsdb.SYSTEM_FLAG_CONFIG_ALLOW_RENAME | + dsdb.SYSTEM_FLAG_CONFIG_ALLOW_MOVE) + + dsa.new_connection(opt, flags, transport, edge_dnstr, None) + + def has_sufficient_edges(self): + """Return True if we have met the maximum "from edges" criteria""" + if len(self.edge_from) >= self.max_edges: + return True + return False + + +class Transport(object): + """Class defines a Inter-site transport found under Sites + """ + + def __init__(self, dnstr): + self.dnstr = dnstr + self.options = 0 + self.guid = None + self.name = None + self.address_attr = None + self.bridgehead_list = [] + + def __str__(self): + """Debug dump string output of Transport object""" + + text = "%s:\n\tdn=%s" % (self.__class__.__name__, self.dnstr) +\ + "\n\tguid=%s" % str(self.guid) +\ + "\n\toptions=%d" % self.options +\ + "\n\taddress_attr=%s" % self.address_attr +\ + "\n\tname=%s" % self.name +\ + "".join("\n\tbridgehead_list=%s" % dnstr for dnstr in self.bridgehead_list) + + return text + + def load_transport(self, samdb): + """Given a Transport object with an prior initialization + for the object's DN, search for the DN and load attributes + from the samdb. + """ + attrs = ["objectGUID", + "options", + "name", + "bridgeheadServerListBL", + "transportAddressAttribute"] + try: + res = samdb.search(base=self.dnstr, scope=ldb.SCOPE_BASE, + attrs=attrs) + + except ldb.LdbError as e18: + (enum, estr) = e18.args + raise KCCError("Unable to find Transport for (%s) - (%s)" % + (self.dnstr, estr)) + + msg = res[0] + self.guid = misc.GUID(samdb.schema_format_value("objectGUID", + msg["objectGUID"][0])) + + if "options" in msg: + self.options = int(msg["options"][0]) + + if "transportAddressAttribute" in msg: + self.address_attr = str(msg["transportAddressAttribute"][0]) + + if "name" in msg: + self.name = str(msg["name"][0]) + + if "bridgeheadServerListBL" in msg: + for value in msg["bridgeheadServerListBL"]: + dsdn = dsdb_Dn(samdb, value.decode('utf8')) + dnstr = str(dsdn.dn) + if dnstr not in self.bridgehead_list: + self.bridgehead_list.append(dnstr) + + +class RepsFromTo(object): + """Class encapsulation of the NDR repsFromToBlob. + + Removes the necessity of external code having to + understand about other_info or manipulation of + update flags. + """ + def __init__(self, nc_dnstr=None, ndr_blob=None): + + self.__dict__['to_be_deleted'] = False + self.__dict__['nc_dnstr'] = nc_dnstr + self.__dict__['update_flags'] = 0x0 + # XXX the following sounds dubious and/or better solved + # elsewhere, but lets leave it for now. In particular, there + # seems to be no reason for all the non-ndr generated + # attributes to be handled in the round about way (e.g. + # self.__dict__['to_be_deleted'] = False above). On the other + # hand, it all seems to work. Hooray! Hands off!. + # + # WARNING: + # + # There is a very subtle bug here with python + # and our NDR code. If you assign directly to + # a NDR produced struct (e.g. t_repsFrom.ctr.other_info) + # then a proper python GC reference count is not + # maintained. + # + # To work around this we maintain an internal + # reference to "dns_name(x)" and "other_info" elements + # of repsFromToBlob. This internal reference + # is hidden within this class but it is why you + # see statements like this below: + # + # self.__dict__['ndr_blob'].ctr.other_info = \ + # self.__dict__['other_info'] = drsblobs.repsFromTo1OtherInfo() + # + # That would appear to be a redundant assignment but + # it is necessary to hold a proper python GC reference + # count. + if ndr_blob is None: + self.__dict__['ndr_blob'] = drsblobs.repsFromToBlob() + self.__dict__['ndr_blob'].version = 0x1 + self.__dict__['dns_name1'] = None + self.__dict__['dns_name2'] = None + + self.__dict__['ndr_blob'].ctr.other_info = \ + self.__dict__['other_info'] = drsblobs.repsFromTo1OtherInfo() + + else: + self.__dict__['ndr_blob'] = ndr_blob + self.__dict__['other_info'] = ndr_blob.ctr.other_info + + if ndr_blob.version == 0x1: + self.__dict__['dns_name1'] = ndr_blob.ctr.other_info.dns_name + self.__dict__['dns_name2'] = None + else: + self.__dict__['dns_name1'] = ndr_blob.ctr.other_info.dns_name1 + self.__dict__['dns_name2'] = ndr_blob.ctr.other_info.dns_name2 + + def __str__(self): + """Debug dump string output of class""" + + text = "%s:" % self.__class__.__name__ +\ + "\n\tdnstr=%s" % self.nc_dnstr +\ + "\n\tupdate_flags=0x%X" % self.update_flags +\ + "\n\tversion=%d" % self.version +\ + "\n\tsource_dsa_obj_guid=%s" % self.source_dsa_obj_guid +\ + ("\n\tsource_dsa_invocation_id=%s" % + self.source_dsa_invocation_id) +\ + "\n\ttransport_guid=%s" % self.transport_guid +\ + "\n\treplica_flags=0x%X" % self.replica_flags +\ + ("\n\tconsecutive_sync_failures=%d" % + self.consecutive_sync_failures) +\ + "\n\tlast_success=%s" % self.last_success +\ + "\n\tlast_attempt=%s" % self.last_attempt +\ + "\n\tdns_name1=%s" % self.dns_name1 +\ + "\n\tdns_name2=%s" % self.dns_name2 +\ + "\n\tschedule[ " +\ + "".join("0x%X " % slot for slot in self.schedule) +\ + "]" + + return text + + def __setattr__(self, item, value): + """Set an attribute and change update flag. + + Be aware that setting any RepsFromTo attribute will set the + drsuapi.DRSUAPI_DRS_UPDATE_ADDRESS update flag. + """ + if item in ['schedule', 'replica_flags', 'transport_guid', + 'source_dsa_obj_guid', 'source_dsa_invocation_id', + 'consecutive_sync_failures', 'last_success', + 'last_attempt']: + + if item in ['replica_flags']: + self.__dict__['update_flags'] |= drsuapi.DRSUAPI_DRS_UPDATE_FLAGS + elif item in ['schedule']: + self.__dict__['update_flags'] |= drsuapi.DRSUAPI_DRS_UPDATE_SCHEDULE + + setattr(self.__dict__['ndr_blob'].ctr, item, value) + + elif item in ['dns_name1']: + self.__dict__['dns_name1'] = value + + if self.__dict__['ndr_blob'].version == 0x1: + self.__dict__['ndr_blob'].ctr.other_info.dns_name = \ + self.__dict__['dns_name1'] + else: + self.__dict__['ndr_blob'].ctr.other_info.dns_name1 = \ + self.__dict__['dns_name1'] + + elif item in ['dns_name2']: + self.__dict__['dns_name2'] = value + + if self.__dict__['ndr_blob'].version == 0x1: + raise AttributeError(item) + else: + self.__dict__['ndr_blob'].ctr.other_info.dns_name2 = \ + self.__dict__['dns_name2'] + + elif item in ['nc_dnstr']: + self.__dict__['nc_dnstr'] = value + + elif item in ['to_be_deleted']: + self.__dict__['to_be_deleted'] = value + + elif item in ['version']: + raise AttributeError("Attempt to set readonly attribute %s" % item) + else: + raise AttributeError("Unknown attribute %s" % item) + + self.__dict__['update_flags'] |= drsuapi.DRSUAPI_DRS_UPDATE_ADDRESS + + def __getattr__(self, item): + """Overload of RepsFromTo attribute retrieval. + + Allows external code to ignore substructures within the blob + """ + if item in ['schedule', 'replica_flags', 'transport_guid', + 'source_dsa_obj_guid', 'source_dsa_invocation_id', + 'consecutive_sync_failures', 'last_success', + 'last_attempt']: + return getattr(self.__dict__['ndr_blob'].ctr, item) + + elif item in ['version']: + return self.__dict__['ndr_blob'].version + + elif item in ['dns_name1']: + if self.__dict__['ndr_blob'].version == 0x1: + return self.__dict__['ndr_blob'].ctr.other_info.dns_name + else: + return self.__dict__['ndr_blob'].ctr.other_info.dns_name1 + + elif item in ['dns_name2']: + if self.__dict__['ndr_blob'].version == 0x1: + raise AttributeError(item) + else: + return self.__dict__['ndr_blob'].ctr.other_info.dns_name2 + + elif item in ['to_be_deleted']: + return self.__dict__['to_be_deleted'] + + elif item in ['nc_dnstr']: + return self.__dict__['nc_dnstr'] + + elif item in ['update_flags']: + return self.__dict__['update_flags'] + + raise AttributeError("Unknown attribute %s" % item) + + def is_modified(self): + return (self.update_flags != 0x0) + + def set_unmodified(self): + self.__dict__['update_flags'] = 0x0 + + +class SiteLink(object): + """Class defines a site link found under sites + """ + + def __init__(self, dnstr): + self.dnstr = dnstr + self.options = 0 + self.system_flags = 0 + self.cost = 0 + self.schedule = None + self.interval = None + self.site_list = [] + + def __str__(self): + """Debug dump string output of Transport object""" + + text = "%s:\n\tdn=%s" % (self.__class__.__name__, self.dnstr) +\ + "\n\toptions=%d" % self.options +\ + "\n\tsystem_flags=%d" % self.system_flags +\ + "\n\tcost=%d" % self.cost +\ + "\n\tinterval=%s" % self.interval + + if self.schedule is not None: + text += "\n\tschedule.size=%s" % self.schedule.size +\ + "\n\tschedule.bandwidth=%s" % self.schedule.bandwidth +\ + ("\n\tschedule.numberOfSchedules=%s" % + self.schedule.numberOfSchedules) + + for i, header in enumerate(self.schedule.headerArray): + text += ("\n\tschedule.headerArray[%d].type=%d" % + (i, header.type)) +\ + ("\n\tschedule.headerArray[%d].offset=%d" % + (i, header.offset)) +\ + "\n\tschedule.dataArray[%d].slots[ " % i +\ + "".join("0x%X " % slot for slot in self.schedule.dataArray[i].slots) +\ + "]" + + for guid, dn in self.site_list: + text = text + "\n\tsite_list=%s (%s)" % (guid, dn) + return text + + def load_sitelink(self, samdb): + """Given a siteLink object with an prior initialization + for the object's DN, search for the DN and load attributes + from the samdb. + """ + attrs = ["options", + "systemFlags", + "cost", + "schedule", + "replInterval", + "siteList"] + try: + res = samdb.search(base=self.dnstr, scope=ldb.SCOPE_BASE, + attrs=attrs, controls=['extended_dn:0']) + + except ldb.LdbError as e19: + (enum, estr) = e19.args + raise KCCError("Unable to find SiteLink for (%s) - (%s)" % + (self.dnstr, estr)) + + msg = res[0] + + if "options" in msg: + self.options = int(msg["options"][0]) + + if "systemFlags" in msg: + self.system_flags = int(msg["systemFlags"][0]) + + if "cost" in msg: + self.cost = int(msg["cost"][0]) + + if "replInterval" in msg: + self.interval = int(msg["replInterval"][0]) + + if "siteList" in msg: + for value in msg["siteList"]: + dsdn = dsdb_Dn(samdb, value.decode('utf8')) + guid = misc.GUID(dsdn.dn.get_extended_component('GUID')) + dnstr = str(dsdn.dn) + if (guid, dnstr) not in self.site_list: + self.site_list.append((guid, dnstr)) + + if "schedule" in msg: + self.schedule = ndr_unpack(drsblobs.schedule, value) + else: + self.schedule = new_connection_schedule() + + +class KCCFailedObject(object): + def __init__(self, uuid, failure_count, time_first_failure, + last_result, dns_name): + self.uuid = uuid + self.failure_count = failure_count + self.time_first_failure = time_first_failure + self.last_result = last_result + self.dns_name = dns_name + + +################################################## +# Global Functions and Variables +################################################## + +def get_dsa_config_rep(dsa): + # Find configuration NC replica for the DSA + for c_rep in dsa.current_rep_table.values(): + if c_rep.is_config(): + return c_rep + + raise KCCError("Unable to find config NC replica for (%s)" % + dsa.dsa_dnstr) + + +def new_connection_schedule(): + """Create a default schedule for an NTDSConnection or Sitelink. This + is packed differently from the repltimes schedule used elsewhere + in KCC (where the 168 nibbles are packed into 84 bytes). + """ + # 168 byte instances of the 0x01 value. The low order 4 bits + # of the byte equate to 15 minute intervals within a single hour. + # There are 168 bytes because there are 168 hours in a full week + # Effectively we are saying to perform replication at the end of + # each hour of the week + schedule = drsblobs.schedule() + + schedule.size = 188 + schedule.bandwidth = 0 + schedule.numberOfSchedules = 1 + + header = drsblobs.scheduleHeader() + header.type = 0 + header.offset = 20 + + schedule.headerArray = [header] + + data = drsblobs.scheduleSlots() + data.slots = [0x01] * 168 + + schedule.dataArray = [data] + return schedule + + +################################################## +# DNS related calls +################################################## + +def uncovered_sites_to_cover(samdb, site_name): + """ + Discover which sites have no DCs and whose lowest single-hop cost + distance for any link attached to that site is linked to the site supplied. + + We compare the lowest cost of your single-hop link to this site to all of + those available (if it exists). This means that a lower ranked siteLink + with only the uncovered site can trump any available links (but this can + only be done with specific, poorly enacted user configuration). + + If the site is connected to more than one other site with the same + siteLink, only the largest site (failing that sorted alphabetically) + creates the DNS records. + + :param samdb database + :param site_name origin site (with a DC) + + :return a list of sites this site should be covering (for DNS) + """ + sites_to_cover = [] + + server_res = samdb.search(base=samdb.get_config_basedn(), + scope=ldb.SCOPE_SUBTREE, + expression="(&(objectClass=server)" + "(serverReference=*))") + + site_res = samdb.search(base=samdb.get_config_basedn(), + scope=ldb.SCOPE_SUBTREE, + expression="(objectClass=site)") + + sites_in_use = Counter() + dc_count = 0 + + # Assume server is of form DC,Servers,Site-ABCD because of schema + for msg in server_res: + site_dn = msg.dn.parent().parent() + sites_in_use[site_dn.canonical_str()] += 1 + + if site_dn.get_rdn_value().lower() == site_name.lower(): + dc_count += 1 + + if len(sites_in_use) != len(site_res): + # There is a possible uncovered site + sites_uncovered = [] + + for msg in site_res: + if msg.dn.canonical_str() not in sites_in_use: + sites_uncovered.append(msg) + + own_site_dn = "CN={},CN=Sites,{}".format( + ldb.binary_encode(site_name), + ldb.binary_encode(str(samdb.get_config_basedn())) + ) + + for site in sites_uncovered: + encoded_dn = ldb.binary_encode(str(site.dn)) + + # Get a sorted list of all siteLinks featuring the uncovered site + link_res1 = samdb.search(base=samdb.get_config_basedn(), + scope=ldb.SCOPE_SUBTREE, attrs=["cost"], + expression="(&(objectClass=siteLink)" + "(siteList={}))".format(encoded_dn), + controls=["server_sort:1:0:cost"]) + + # Get a sorted list of all siteLinks connecting this an the + # uncovered site + link_res2 = samdb.search(base=samdb.get_config_basedn(), + scope=ldb.SCOPE_SUBTREE, + attrs=["cost", "siteList"], + expression="(&(objectClass=siteLink)" + "(siteList={})(siteList={}))".format( + own_site_dn, + encoded_dn), + controls=["server_sort:1:0:cost"]) + + # Add to list if your link is equal in cost to lowest cost link + if len(link_res1) > 0 and len(link_res2) > 0: + cost1 = int(link_res1[0]['cost'][0]) + cost2 = int(link_res2[0]['cost'][0]) + + # Own siteLink must match the lowest cost link + if cost1 != cost2: + continue + + # In a siteLink with more than 2 sites attached, only pick the + # largest site, and if there are multiple, the earliest + # alphabetically. + to_cover = True + for site_val in link_res2[0]['siteList']: + site_dn = ldb.Dn(samdb, str(site_val)) + site_dn_str = site_dn.canonical_str() + site_rdn = site_dn.get_rdn_value().lower() + if sites_in_use[site_dn_str] > dc_count: + to_cover = False + break + elif (sites_in_use[site_dn_str] == dc_count and + site_rdn < site_name.lower()): + to_cover = False + break + + if to_cover: + site_cover_rdn = site.dn.get_rdn_value() + sites_to_cover.append(site_cover_rdn.lower()) + + return sites_to_cover diff --git a/python/samba/kcc/ldif_import_export.py b/python/samba/kcc/ldif_import_export.py new file mode 100644 index 0000000..41f0fd7 --- /dev/null +++ b/python/samba/kcc/ldif_import_export.py @@ -0,0 +1,403 @@ +# LDIF helper functions for the samba_kcc tool +# +# Copyright (C) Dave Craft 2011 +# Copyright (C) Andrew Bartlett 2015 +# +# Andrew Bartlett's alleged work performed by his underlings Douglas +# Bagnall and Garming Sam. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os + +from samba import Ldb, ldb, read_and_sub_file +from samba.auth import system_session +from samba.samdb import SamDB, dsdb_Dn + + +class LdifError(Exception): + pass + + +def write_search_result(samdb, f, res): + for msg in res: + lstr = samdb.write_ldif(msg, ldb.CHANGETYPE_NONE) + f.write("%s" % lstr) + + +def ldif_to_samdb(dburl, lp, ldif_file, forced_local_dsa=None): + """Routine to import all objects and attributes that are relevant + to the KCC algorithms from a previously exported LDIF file. + + The point of this function is to allow a programmer/debugger to + import an LDIF file with non-security relevant information that + was previously extracted from a DC database. The LDIF file is used + to create a temporary abbreviated database. The KCC algorithm can + then run against this abbreviated database for debug or test + verification that the topology generated is computationally the + same between different OSes and algorithms. + + :param dburl: path to the temporary abbreviated db to create + :param ldif_file: path to the ldif file to import + """ + if os.path.exists(dburl): + raise LdifError("Specify a database (%s) that doesn't already exist." % + dburl) + + # Use ["modules:"] as we are attempting to build a sam + # database as opposed to start it here. + tmpdb = Ldb(url=dburl, session_info=system_session(), + lp=lp, options=["modules:"]) + + tmpdb.transaction_start() + try: + data = read_and_sub_file(ldif_file, None) + tmpdb.add_ldif(data, None) + if forced_local_dsa: + tmpdb.modify_ldif("""dn: @ROOTDSE +changetype: modify +replace: dsServiceName +dsServiceName: CN=NTDS Settings,%s + """ % forced_local_dsa) + + tmpdb.add_ldif("""dn: @MODULES +@LIST: rootdse,extended_dn_in,extended_dn_out_ldb,objectguid +- +""") + + except Exception as estr: + tmpdb.transaction_cancel() + raise LdifError("Failed to import %s: %s" % (ldif_file, estr)) + + tmpdb.transaction_commit() + + # We have an abbreviated list of options here because we have built + # an abbreviated database. We use the rootdse and extended-dn + # modules only during this re-open + samdb = SamDB(url=dburl, session_info=system_session(), lp=lp) + return samdb + + +def samdb_to_ldif_file(samdb, dburl, lp, creds, ldif_file): + """Routine to extract all objects and attributes that are relevant + to the KCC algorithms from a DC database. + + The point of this function is to allow a programmer/debugger to + extract an LDIF file with non-security relevant information from + a DC database. The LDIF file can then be used to "import" via + the import_ldif() function this file into a temporary abbreviated + database. The KCC algorithm can then run against this abbreviated + database for debug or test verification that the topology generated + is computationally the same between different OSes and algorithms. + + :param dburl: LDAP database URL to extract info from + :param ldif_file: output LDIF file name to create + """ + try: + samdb = SamDB(url=dburl, + session_info=system_session(), + credentials=creds, lp=lp) + except ldb.LdbError as e: + (enum, estr) = e.args + raise LdifError("Unable to open sam database (%s) : %s" % + (dburl, estr)) + + if os.path.exists(ldif_file): + raise LdifError("Specify a file (%s) that doesn't already exist." % + ldif_file) + + try: + f = open(ldif_file, "w") + except IOError as ioerr: + raise LdifError("Unable to open (%s) : %s" % (ldif_file, str(ioerr))) + + try: + # Query Partitions + attrs = ["objectClass", + "objectGUID", + "cn", + "whenChanged", + "objectSid", + "Enabled", + "systemFlags", + "dnsRoot", + "nCName", + "msDS-NC-Replica-Locations", + "msDS-NC-RO-Replica-Locations"] + + sstr = "CN=Partitions,%s" % samdb.get_config_basedn() + res = samdb.search(base=sstr, scope=ldb.SCOPE_SUBTREE, + attrs=attrs, + expression="(objectClass=crossRef)") + + # Write partitions output + write_search_result(samdb, f, res) + + # Query cross reference container + attrs = ["objectClass", + "objectGUID", + "cn", + "whenChanged", + "fSMORoleOwner", + "systemFlags", + "msDS-Behavior-Version", + "msDS-EnabledFeature"] + + sstr = "CN=Partitions,%s" % samdb.get_config_basedn() + res = samdb.search(base=sstr, scope=ldb.SCOPE_SUBTREE, + attrs=attrs, + expression="(objectClass=crossRefContainer)") + + # Write cross reference container output + write_search_result(samdb, f, res) + + # Query Sites + attrs = ["objectClass", + "objectGUID", + "cn", + "whenChanged", + "systemFlags"] + + sstr = "CN=Sites,%s" % samdb.get_config_basedn() + sites = samdb.search(base=sstr, scope=ldb.SCOPE_SUBTREE, + attrs=attrs, + expression="(objectClass=site)") + + # Write sites output + write_search_result(samdb, f, sites) + + # Query NTDS Site Settings + for msg in sites: + sitestr = str(msg.dn) + + attrs = ["objectClass", + "objectGUID", + "cn", + "whenChanged", + "interSiteTopologyGenerator", + "interSiteTopologyFailover", + "schedule", + "options"] + + sstr = "CN=NTDS Site Settings,%s" % sitestr + res = samdb.search(base=sstr, scope=ldb.SCOPE_BASE, + attrs=attrs) + + # Write Site Settings output + write_search_result(samdb, f, res) + + # Naming context list + nclist = [] + + # Query Directory Service Agents + for msg in sites: + sstr = str(msg.dn) + + ncattrs = ["hasMasterNCs", + "msDS-hasMasterNCs", + "hasPartialReplicaNCs", + "msDS-HasDomainNCs", + "msDS-hasFullReplicaNCs", + "msDS-HasInstantiatedNCs"] + attrs = ["objectClass", + "objectGUID", + "cn", + "whenChanged", + "invocationID", + "options", + "msDS-isRODC", + "msDS-Behavior-Version"] + + res = samdb.search(base=sstr, scope=ldb.SCOPE_SUBTREE, + attrs=attrs + ncattrs, + expression="(objectClass=nTDSDSA)") + + # Spin thru all the DSAs looking for NC replicas + # and build a list of all possible Naming Contexts + # for subsequent retrieval below + for res_msg in res: + for k in res_msg.keys(): + if k in ncattrs: + for value in res_msg[k]: + # Some of these have binary DNs so + # use dsdb_Dn to split out relevant parts + dsdn = dsdb_Dn(samdb, value.decode('utf8')) + dnstr = str(dsdn.dn) + if dnstr not in nclist: + nclist.append(dnstr) + + # Write DSA output + write_search_result(samdb, f, res) + + # Query NTDS Connections + for msg in sites: + sstr = str(msg.dn) + + attrs = ["objectClass", + "objectGUID", + "cn", + "whenChanged", + "options", + "whenCreated", + "enabledConnection", + "schedule", + "transportType", + "fromServer", + "systemFlags"] + + res = samdb.search(base=sstr, scope=ldb.SCOPE_SUBTREE, + attrs=attrs, + expression="(objectClass=nTDSConnection)") + # Write NTDS Connection output + write_search_result(samdb, f, res) + + # Query Intersite transports + attrs = ["objectClass", + "objectGUID", + "cn", + "whenChanged", + "options", + "name", + "bridgeheadServerListBL", + "transportAddressAttribute"] + + sstr = "CN=Inter-Site Transports,CN=Sites,%s" % \ + samdb.get_config_basedn() + res = samdb.search(sstr, scope=ldb.SCOPE_SUBTREE, + attrs=attrs, + expression="(objectClass=interSiteTransport)") + + # Write inter-site transport output + write_search_result(samdb, f, res) + + # Query siteLink + attrs = ["objectClass", + "objectGUID", + "cn", + "whenChanged", + "systemFlags", + "options", + "schedule", + "replInterval", + "siteList", + "cost"] + + sstr = "CN=Sites,%s" % \ + samdb.get_config_basedn() + res = samdb.search(sstr, scope=ldb.SCOPE_SUBTREE, + attrs=attrs, + expression="(objectClass=siteLink)", + controls=['extended_dn:0']) + + # Write siteLink output + write_search_result(samdb, f, res) + + # Query siteLinkBridge + attrs = ["objectClass", + "objectGUID", + "cn", + "whenChanged", + "siteLinkList"] + + sstr = "CN=Sites,%s" % samdb.get_config_basedn() + res = samdb.search(sstr, scope=ldb.SCOPE_SUBTREE, + attrs=attrs, + expression="(objectClass=siteLinkBridge)") + + # Write siteLinkBridge output + write_search_result(samdb, f, res) + + # Query servers containers + # Needed for samdb.server_site_name() + attrs = ["objectClass", + "objectGUID", + "cn", + "whenChanged", + "systemFlags"] + + sstr = "CN=Sites,%s" % samdb.get_config_basedn() + res = samdb.search(sstr, scope=ldb.SCOPE_SUBTREE, + attrs=attrs, + expression="(objectClass=serversContainer)") + + # Write servers container output + write_search_result(samdb, f, res) + + # Query servers + # Needed because some transport interfaces refer back to + # attributes found in the server object. Also needed + # so extended-dn will be happy with dsServiceName in rootDSE + attrs = ["objectClass", + "objectGUID", + "cn", + "whenChanged", + "systemFlags", + "dNSHostName", + "mailAddress"] + + sstr = "CN=Sites,%s" % samdb.get_config_basedn() + res = samdb.search(sstr, scope=ldb.SCOPE_SUBTREE, + attrs=attrs, + expression="(objectClass=server)") + + # Write server output + write_search_result(samdb, f, res) + + # Query Naming Context replicas + attrs = ["objectClass", + "objectGUID", + "cn", + "whenChanged", + "objectSid", + "fSMORoleOwner", + "msDS-Behavior-Version", + "repsFrom", + "repsTo"] + + for sstr in nclist: + res = samdb.search(sstr, scope=ldb.SCOPE_BASE, + attrs=attrs) + + # Write naming context output + write_search_result(samdb, f, res) + + # Query rootDSE replicas + attrs = ["objectClass", + "objectGUID", + "cn", + "whenChanged", + "rootDomainNamingContext", + "configurationNamingContext", + "schemaNamingContext", + "defaultNamingContext", + "dsServiceName"] + + sstr = "" + res = samdb.search(sstr, scope=ldb.SCOPE_BASE, + attrs=attrs) + + # Record the rootDSE object as a dn as it + # would appear in the base ldb file. We have + # to save it this way because we are going to + # be importing as an abbreviated database. + res[0].dn = ldb.Dn(samdb, "@ROOTDSE") + + # Write rootdse output + write_search_result(samdb, f, res) + + except ldb.LdbError as e1: + (enum, estr) = e1.args + raise LdifError("Error processing (%s) : %s" % (sstr, estr)) + + f.close() diff --git a/python/samba/logger.py b/python/samba/logger.py new file mode 100644 index 0000000..a35ef2a --- /dev/null +++ b/python/samba/logger.py @@ -0,0 +1,69 @@ +# Samba common functions +# +# Copyright (C) Joe Guo +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import sys +import logging +from samba.colour import GREY, YELLOW, GREEN, RED, DARK_RED, C_NORMAL +from samba.colour import is_colour_wanted + +LEVEL_COLORS = { + logging.CRITICAL: DARK_RED, + logging.ERROR: RED, + logging.WARNING: YELLOW, + logging.INFO: GREEN, + logging.DEBUG: GREY, +} + + +class ColoredFormatter(logging.Formatter): + """Add color to log according to level""" + + def format(self, record): + log = super().format(record) + color = LEVEL_COLORS.get(record.levelno, GREY) + return color + log + C_NORMAL + + +def get_samba_logger( + name='samba', stream=sys.stderr, + level=None, verbose=False, quiet=False, + fmt=('%(levelname)s %(asctime)s pid:%(process)d ' + '%(pathname)s #%(lineno)d: %(message)s'), + datefmt=None): + """ + Get a logger instance and config it. + """ + logger = logging.getLogger(name) + + if not level: + # if level not specified, map options to level + level = ((verbose and logging.DEBUG) or + (quiet and logging.WARNING) or logging.INFO) + + logger.setLevel(level) + if is_colour_wanted(stream): + Formatter = ColoredFormatter + else: + Formatter = logging.Formatter + formatter = Formatter(fmt=fmt, datefmt=datefmt) + + handler = logging.StreamHandler(stream=stream) + handler.setFormatter(formatter) + logger.addHandler(handler) + + return logger diff --git a/python/samba/mdb_util.py b/python/samba/mdb_util.py new file mode 100644 index 0000000..688e066 --- /dev/null +++ b/python/samba/mdb_util.py @@ -0,0 +1,43 @@ +# Unix SMB/CIFS implementation. +# mdb util helpers +# +# Copyright (C) Andrew Bartlett 2018 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import subprocess +import os +from samba.netcmd import CommandError + + +def mdb_copy(file1, file2): + """Copy mdb file using mdb_copy utility and rename it + """ + # Find the location of the mdb_copy tool + dirs = os.getenv('PATH').split(os.pathsep) + found = False + for d in dirs: + toolpath = os.path.join(d, "mdb_copy") + if os.path.exists(toolpath): + found = True + break + + if not found: + raise CommandError("mdb_copy not found. " + "You may need to install the lmdb-utils package") + + mdb_copy_cmd = [toolpath, "-n", file1, "%s.copy.mdb" % file1] + status = subprocess.check_call(mdb_copy_cmd, close_fds=True, shell=False) + + os.rename("%s.copy.mdb" % file1, file2) diff --git a/python/samba/ms_display_specifiers.py b/python/samba/ms_display_specifiers.py new file mode 100644 index 0000000..ae48dce --- /dev/null +++ b/python/samba/ms_display_specifiers.py @@ -0,0 +1,195 @@ +# Create DisplaySpecifiers LDIF (as a string) from the documents provided by +# Microsoft under the WSPP. +# +# Copyright (C) Andrew Kroeger 2009 +# +# Based on ms_schema.py +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import re + + +def __read_folded_line(f, buffer): + """Read a line from an LDIF file, unfolding it""" + line = buffer + + while True: + l = f.readline() + + if l[:1] == " ": + # continued line + + # cannot fold an empty line + assert(line != "" and line != "\n") + + # preserves '\n ' + line = line + l + else: + # non-continued line + if line == "": + line = l + + if l == "": + # eof, definitely won't be folded + break + else: + # marks end of a folded line + # line contains the now unfolded line + # buffer contains the start of the next possibly folded line + buffer = l + break + + return (line, buffer) + + +# Only compile regexp once. +# Will not match options after the attribute type. +attr_type_re = re.compile("^([A-Za-z][A-Za-z0-9-]*):") + + +def __read_raw_entries(f): + """Read an LDIF entry, only unfolding lines""" + + buffer = "" + + while True: + entry = [] + + while True: + (l, buffer) = __read_folded_line(f, buffer) + + if l[:1] == "#": + continue + + if l == "\n" or l == "": + break + + m = attr_type_re.match(l) + + if m: + if l[-1:] == "\n": + l = l[:-1] + + entry.append(l) + else: + print("Invalid line: %s" % l, end=' ', file=sys.stderr) + sys.exit(1) + + if len(entry): + yield entry + + if l == "": + break + + +def fix_dn(dn): + """Fix a string DN to use ${CONFIGDN}""" + + if dn.find("") != -1: + dn = dn.replace("\n ", "") + return dn.replace("", "${CONFIGDN}") + else: + return dn + + +def __write_ldif_one(entry): + """Write out entry as LDIF""" + out = [] + + for l in entry: + if l[2] == 0: + out.append("%s: %s" % (l[0], l[1])) + else: + # This is a base64-encoded value + out.append("%s:: %s" % (l[0], l[1])) + + return "\n".join(out) + + +def __transform_entry(entry): + """Perform required transformations to the Microsoft-provided LDIF""" + + temp_entry = [] + + for l in entry: + t = [] + + if l.find("::") != -1: + # This is a base64-encoded value + t = l.split(":: ", 1) + t.append(1) + else: + t = l.split(": ", 1) + t.append(0) + + key = t[0].lower() + + if key == "changetype": + continue + + if key == "distinguishedname": + continue + + if key == "instancetype": + continue + + if key == "name": + continue + + if key == "cn": + continue + + if key == "objectcategory": + continue + + if key == "showinadvancedviewonly": + value = t[1].upper().lstrip().rstrip() + if value == "TRUE": + # Remove showInAdvancedViewOnly attribute if it is set to the + # default value of TRUE + continue + + t[1] = fix_dn(t[1]) + + temp_entry.append(t) + + entry = temp_entry + + return entry + + +def read_ms_ldif(filename): + """Read and transform Microsoft-provided LDIF file.""" + + out = [] + + from io import open + with open(filename, "r", encoding='latin-1') as f: + for entry in __read_raw_entries(f): + out.append(__write_ldif_one(__transform_entry(entry))) + + return "\n\n".join(out) + "\n\n" + + +if __name__ == '__main__': + import sys + + try: + display_specifiers_file = sys.argv[1] + except IndexError: + print("Usage: %s display-specifiers-ldif-file.txt" % (sys.argv[0]), file=sys.stderr) + sys.exit(1) + + print(read_ms_ldif(display_specifiers_file)) diff --git a/python/samba/ms_forest_updates_markdown.py b/python/samba/ms_forest_updates_markdown.py new file mode 100644 index 0000000..0a0d211 --- /dev/null +++ b/python/samba/ms_forest_updates_markdown.py @@ -0,0 +1,309 @@ +# Create forest updates ldif from Github markdown +# +# Each update is converted to an ldif then gets written to a corresponding +# .LDF output file or stored in a dictionary. +# +# Only add updates can generally be applied. +# +# Copyright (C) Andrew Bartlett 2017 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +"""Generate LDIF from Github documentation.""" + +import re +import os +import markdown +import xml.etree.ElementTree as ET +from samba.common import get_string + + +# Display specifier updates or otherwise (ignored in forest_update.py) +def noop(description, attributes, sd): + return (None, None, [], None) + + +# ACE addition updates (ignored in forest_update.py) +def parse_grant(description, attributes, sd): + # Granting the "CN=Send-As,CN=Extended-Rights" to gMSA accounts. + if (description.startswith("Granting the ") and + description.endswith("to gMSA accounts.") and + (attributes and attributes.lower() == 'n/a') and + (sd and sd.lower() == 'n/a')): + return ('modify', extract_dn_or_none(description), + ['add: appliesTo', 'appliesTo: 7b8b558a-93a5-4af7-adca-c017e67f1057'], + None) + + return ('modify', None, [], sd if sd.lower() != 'n/a' else None) + + +# Addition of new objects to the directory (most are applied in forest_update.py) +def parse_add(description, attributes, sd): + dn = extract_dn(description) + return ('add', dn, extract_attrib(dn, attributes), sd if sd.lower() != 'n/a' else None) + + +# Set of a particular attribute (ignored in forest_update.py) +def parse_set(description, attributes, sd): + return ('modify', extract_dn_or_none(description), + extract_replace_attrib(attributes), + sd if sd.lower() != 'n/a' else None) + + +# Set of a particular ACE (ignored in forest_update.py) +# The general issue is that the list of DNs must be generated dynamically +def parse_ace(description, attributes, sd): + + def extract_dn_ace(text): + if 'Sam-Domain' in text: + return ('${DOMAIN_DN}', 'CN=Sam-Domain,${SCHEMA_DN}') + elif 'Domain-DNS' in text: + return ('${...}', 'CN=Domain-DNS,${SCHEMA_DN}') + + return None + + return [('modify', extract_dn_ace(description)[0], + ['replace: nTSecurityDescriptor', + 'nTSecurityDescriptor: ${DOMAIN_SCHEMA_SD}%s' % sd], None), + ('modify', extract_dn_ace(description)[1], + ['replace: defaultSecurityDescriptor', + 'defaultSecurityDescriptor: ${OLD_SAMBA_SD}%s' % sd], None)] + + +# We are really only interested in 'Created' items +operation_map = { + # modify + 'Granting': parse_grant, + # add + 'Created': parse_add, + # modify + 'Set': parse_set, + # modify + 'Added ACE': parse_ace, + # modify + 'Updated': parse_set, + # unknown + 'Call': noop +} + + +def extract_dn(text): + """ + Extract a DN from the textual description + :param text: + :return: DN in string form + """ + text = text.replace(' in the Schema partition.', ',${SCHEMA_DN}') + text = text.replace(' in the Configuration partition.', ',${CONFIG_DN}') + dn = re.search('([CDO][NCU]=.*?,)*([CDO][NCU]=.*)', text).group(0) + + # This should probably be also fixed upstream + if dn == 'CN=ad://ext/AuthenticationSilo,CN=Claim Types,CN=Claims Configuration,CN=Services': + return 'CN=ad://ext/AuthenticationSilo,CN=Claim Types,CN=Claims Configuration,CN=Services,${CONFIG_DN}' + + # Granting the "CN=Send-As,CN=Extended-Rights" to gMSA accounts. + if dn.endswith(',CN=Extended-Rights" to gMSA accounts.'): + dn = dn.replace('" to gMSA accounts.', '') + return dn + ",${CONFIG_DN}" + + return dn + + +def extract_dn_or_none(text): + """ + Same as above, but returns None if it doesn't work + :param text: + :return: DN or None + """ + try: + return extract_dn(text) + except: + return None + + +def save_ldif(filename, answers, out_folder): + """ + Save ldif to disk for each updates + :param filename: filename use ([OPERATION NUM]-{GUID}.ldif) + :param answers: array of tuples generated with earlier functions + :param out_folder: folder to prepend + """ + path = os.path.join(out_folder, filename) + with open(path, 'w') as ldif: + for answer in answers: + change, dn, attrib, sd = answer + ldif.write('dn: %s\n' % dn) + ldif.write('changetype: %s\n' % change) + if len(attrib) > 0: + ldif.write('\n'.join(attrib) + '\n') + if sd is not None: + ldif.write('nTSecurityDescriptor: D:%s\n' % sd) + ldif.write('-\n\n') + + +def save_array(guid, answers, out_dict): + """ + Save ldif to an output dictionary + :param guid: GUID to store + :param answers: array of tuples generated with earlier functions + :param out_dict: output dictionary + """ + ldif = '' + for answer in answers: + change, dn, attrib, sd = answer + ldif += 'dn: %s\n' % dn + ldif += 'changetype: %s\n' % change + if len(attrib) > 0: + ldif += '\n'.join(attrib) + '\n' + if sd is not None: + ldif += 'nTSecurityDescriptor: D:%s\n' % sd + ldif += '-\n\n' + + out_dict[guid] = ldif + + +def extract_attrib(dn, attributes): + """ + Extract the attributes as an array from the attributes column + :param dn: parsed from markdown + :param attributes: from markdown + :return: attribute array (ldif-type format) + """ + attrib = [x.lstrip('- ') for x in attributes.split('- ') if x.lower() != 'n/a' and x != ''] + attrib = [x.replace(': True', ': TRUE') if x.endswith(': True') else x for x in attrib] + attrib = [x.replace(': False', ': FALSE') if x.endswith(': False') else x for x in attrib] + # We only have one such value, we may as well skip them all consistently + attrib = [x for x in attrib if not x.lower().startswith('msds-claimpossiblevalues')] + + return attrib + + +def extract_replace_attrib(attributes): + """ + Extract the attributes as an array from the attributes column + (for replace) + :param attributes: from markdown + :return: attribute array (ldif-type format) + """ + lines = [x.lstrip('- ') for x in attributes.split('- ') if x.lower() != 'n/a' and x != ''] + lines = [('replace: %s' % line.split(':')[0], line) for line in lines] + lines = [line for pair in lines for line in pair] + return lines + + +def innertext(tag): + return (tag.text or '') + \ + ''.join(innertext(e) for e in tag) + \ + (tag.tail or '') + + +def read_ms_markdown(in_file, out_folder=None, out_dict=None): + """ + Read Github documentation to produce forest wide updates + :param in_file: Forest-Wide-Updates.md + :param out_folder: output folder + :param out_dict: output dictionary + """ + + with open(in_file) as update_file: + # There is a hidden ClaimPossibleValues in this md file + content = update_file.read() + + content = re.sub(r'

', + '
', + content) + content = re.sub(r'CN=\\', + '${FOREST_ROOT_DOMAIN}', + content) + + html = markdown.markdown(content, + output_format='xhtml') + + html = html.replace('CN=Schema,%ws', '${SCHEMA_DN}') + + tree = ET.fromstring('' + html + '') + + for node in tree: + if not node.text: + continue + updates = None + if node.text.startswith('|Operation'): + # Strip first and last | + updates = [x[1:len(x) - 1].split('|') for x in + get_string(ET.tostring(node, method='text')).splitlines()] + elif node.text.startswith('| Operation'): + # Strip first and last | + updates = [x[2:len(x) - 2].split(' | ') for x in + get_string(ET.tostring(node, method='text')).splitlines()] + if updates: + for update in updates[2:]: + output = re.match(r'Operation (\d+): {(.*)}', update[0]) + if output: + # print output.group(1), output.group(2) + guid = output.group(2) + filename = "%s-{%s}.ldif" % (output.group(1).zfill(4), guid) + + found = False + + if update[3].startswith('Created') or update[1].startswith('Added ACE'): + # Trigger the security descriptor code + # Reduce info to just the security descriptor + update[3] = update[3].split(':')[-1] + + result = parse_ace(update[1], update[2], update[3]) + + if filename and out_folder is not None: + save_ldif(filename, result, out_folder) + else: + save_array(guid, result, out_dict) + + continue + + for operation in operation_map: + if update[1].startswith(operation): + found = True + + result = operation_map[operation](update[1], update[2], update[3]) + + if filename and out_folder is not None: + save_ldif(filename, [result], out_folder) + else: + save_array(guid, [result], out_dict) + + break + + if not found: + raise Exception(update) + + # print ET.tostring(node, method='text') + + +if __name__ == '__main__': + import sys + + out_folder = '' + + if len(sys.argv) == 0: + print("Usage: %s []" % (sys.argv[0]), file=sys.stderr) + sys.exit(1) + + in_file = sys.argv[1] + if len(sys.argv) > 2: + out_folder = sys.argv[2] + + read_ms_markdown(in_file, out_folder) diff --git a/python/samba/ms_schema.py b/python/samba/ms_schema.py new file mode 100644 index 0000000..986ae3d --- /dev/null +++ b/python/samba/ms_schema.py @@ -0,0 +1,337 @@ +# create schema.ldif (as a string) from WSPP documentation +# +# based on minschema.py and minschema_wspp +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +"""Generate LDIF from WSPP documentation.""" + +import re +import base64 +import uuid + +bitFields = {} + +# ADTS: 2.2.9 +# bit positions as labeled in the docs +bitFields["searchflags"] = { + 'fATTINDEX': 31, # IX + 'fPDNTATTINDEX': 30, # PI + 'fANR': 29, # AR + 'fPRESERVEONDELETE': 28, # PR + 'fCOPY': 27, # CP + 'fTUPLEINDEX': 26, # TP + 'fSUBTREEATTINDEX': 25, # ST + 'fCONFIDENTIAL': 24, # CF + 'fCONFIDENTAIL': 24, # typo + 'fNEVERVALUEAUDIT': 23, # NV + 'fRODCAttribute': 22, # RO + + + # missing in ADTS but required by LDIF + 'fRODCFilteredAttribute': 22, # RO + 'fRODCFILTEREDATTRIBUTE': 22, # case + 'fEXTENDEDLINKTRACKING': 21, # XL + 'fBASEONLY': 20, # BO + 'fPARTITIONSECRET': 19, # SE +} + +# ADTS: 2.2.10 +bitFields["systemflags"] = { + 'FLAG_ATTR_NOT_REPLICATED': 31, 'FLAG_CR_NTDS_NC': 31, # NR + 'FLAG_ATTR_REQ_PARTIAL_SET_MEMBER': 30, 'FLAG_CR_NTDS_DOMAIN': 30, # PS + 'FLAG_ATTR_IS_CONSTRUCTED': 29, 'FLAG_CR_NTDS_NOT_GC_REPLICATED': 29, # CS + 'FLAG_ATTR_IS_OPERATIONAL': 28, # OP + 'FLAG_SCHEMA_BASE_OBJECT': 27, # BS + 'FLAG_ATTR_IS_RDN': 26, # RD + 'FLAG_DISALLOW_MOVE_ON_DELETE': 6, # DE + 'FLAG_DOMAIN_DISALLOW_MOVE': 5, # DM + 'FLAG_DOMAIN_DISALLOW_RENAME': 4, # DR + 'FLAG_CONFIG_ALLOW_LIMITED_MOVE': 3, # AL + 'FLAG_CONFIG_ALLOW_MOVE': 2, # AM + 'FLAG_CONFIG_ALLOW_RENAME': 1, # AR + 'FLAG_DISALLOW_DELETE': 0 # DD +} + +# ADTS: 2.2.11 +bitFields["schemaflagsex"] = { + 'FLAG_ATTR_IS_CRITICAL': 31 +} + +# ADTS: 3.1.1.2.2.2 +oMObjectClassBER = { + '1.3.12.2.1011.28.0.702': base64.b64encode(b'\x2B\x0C\x02\x87\x73\x1C\x00\x85\x3E').decode('utf8'), + '1.2.840.113556.1.1.1.12': base64.b64encode(b'\x2A\x86\x48\x86\xF7\x14\x01\x01\x01\x0C').decode('utf8'), + '2.6.6.1.2.5.11.29': base64.b64encode(b'\x56\x06\x01\x02\x05\x0B\x1D').decode('utf8'), + '1.2.840.113556.1.1.1.11': base64.b64encode(b'\x2A\x86\x48\x86\xF7\x14\x01\x01\x01\x0B').decode('utf8'), + '1.3.12.2.1011.28.0.714': base64.b64encode(b'\x2B\x0C\x02\x87\x73\x1C\x00\x85\x4A').decode('utf8'), + '1.3.12.2.1011.28.0.732': base64.b64encode(b'\x2B\x0C\x02\x87\x73\x1C\x00\x85\x5C').decode('utf8'), + '1.2.840.113556.1.1.1.6': base64.b64encode(b'\x2A\x86\x48\x86\xF7\x14\x01\x01\x01\x06').decode('utf8') +} + +# separated by commas in docs, and must be broken up +multivalued_attrs = set(["auxiliaryclass", "maycontain", "mustcontain", "posssuperiors", + "systemauxiliaryclass", "systemmaycontain", "systemmustcontain", + "systemposssuperiors"]) + + +def __read_folded_line(f, buffer): + """ reads a line from an LDIF file, unfolding it""" + line = buffer + + attr_type_re = re.compile("^([A-Za-z][A-Za-z0-9-]*[A-Za-z0-9])::?") + + while True: + l = f.readline() + + if l[:1] == " ": + # continued line + + # cannot fold an empty line + assert(line != "" and line != "\n") + + # preserves '\n ' + line = line + l + else: + # non-continued line + if line == "": + line = l + + if l == "": + # eof, definitely won't be folded + break + else: + if l[:1] != "#" and l != "\n" and l != "": + m = attr_type_re.match(l) + if not m: + line = line + " " + l + continue + + # marks end of a folded line + # line contains the now unfolded line + # buffer contains the start of the next possibly folded line + buffer = l + break + + return (line, buffer) + + +def __read_raw_entries(f): + """reads an LDIF entry, only unfolding lines""" + import sys + + # will not match options after the attribute type + # attributes in the schema definition have at least two chars + attr_type_re = re.compile("^([A-Za-z][A-Za-z0-9-]*[A-Za-z0-9])::?") + + buffer = "" + + while True: + entry = [] + + while True: + (l, buffer) = __read_folded_line(f, buffer) + + if l[:1] == "#": + continue + + if l == "\n" or l == "": + break + + m = attr_type_re.match(l) + + if m: + if l[-1:] == "\n": + l = l[:-1] + + entry.append(l) + else: + print("Invalid line: %s" % l, end=' ', file=sys.stderr) + sys.exit(1) + + if len(entry): + yield entry + + if l == "": + break + + +def fix_dn(dn): + """fix a string DN to use ${SCHEMADN}""" + + # folding? + if dn.find("") != -1: + dn = dn.replace("\n ", "") + dn = dn.replace(" ", "") + return dn.replace("CN=Schema,CN=Configuration,", "${SCHEMADN}") + elif dn.endswith("DC=X"): + return dn.replace("CN=Schema,CN=Configuration,DC=X", "${SCHEMADN}") + elif dn.endswith("CN=X"): + return dn.replace("CN=Schema,CN=Configuration,CN=X", "${SCHEMADN}") + else: + return dn + + +def __convert_bitfield(key, value): + """Evaluate the OR expression in 'value'""" + assert(isinstance(value, str)) + + value = value.replace("\n ", "") + value = value.replace(" ", "") + + try: + # some attributes already have numeric values + o = int(value) + except ValueError: + o = 0 + flags = value.split("|") + for f in flags: + bitpos = bitFields[key][f] + o = o | (1 << (31 - bitpos)) + + return str(o) + + +def __write_ldif_one(entry): + """Write out entry as LDIF""" + out = [] + + for l in entry: + if isinstance(l[1], str): + vl = [l[1]] + else: + vl = l[1] + + if l[2]: + out.append("%s:: %s" % (l[0], l[1])) + continue + + for v in vl: + out.append("%s: %s" % (l[0], v)) + + return "\n".join(out) + + +def __transform_entry(entry, objectClass): + """Perform transformations required to convert the LDIF-like schema + file entries to LDIF, including Samba-specific stuff.""" + + entry = [l.split(":", 1) for l in entry] + + cn = "" + skip_dn = skip_objectclass = skip_admin_description = skip_admin_display_name = False + + for l in entry: + if l[1].startswith(': '): + l.append(True) + l[1] = l[1][2:] + else: + l.append(False) + + key = l[0].lower() + l[1] = l[1].lstrip() + l[1] = l[1].rstrip() + + if not cn and key == "cn": + cn = l[1] + + if key in multivalued_attrs: + # unlike LDIF, these are comma-separated + l[1] = l[1].replace("\n ", "") + l[1] = l[1].replace(" ", "") + + l[1] = l[1].split(",") + + if key in bitFields: + l[1] = __convert_bitfield(key, l[1]) + + if key == "omobjectclass": + if not l[2]: + l[1] = oMObjectClassBER[l[1].strip()] + l[2] = True + + if isinstance(l[1], str): + l[1] = fix_dn(l[1]) + + if key == 'dn': + skip_dn = True + dn = l[1] + + if key == 'objectclass': + skip_objectclass = True + elif key == 'admindisplayname': + skip_admin_display_name = True + elif key == 'admindescription': + skip_admin_description = True + + assert(cn) + + header = [] + if not skip_dn: + header.append(["dn", "CN=%s,${SCHEMADN}" % cn, False]) + else: + header.append(["dn", dn, False]) + + if not skip_objectclass: + header.append(["objectClass", ["top", objectClass], False]) + if not skip_admin_description: + header.append(["adminDescription", cn, False]) + if not skip_admin_display_name: + header.append(["adminDisplayName", cn, False]) + + header.append(["objectGUID", str(uuid.uuid4()), False]) + + entry = header + [x for x in entry if x[0].lower() not in set(['dn', 'changetype', 'objectcategory'])] + + return entry + + +def __parse_schema_file(filename, objectClass): + """Load and transform a schema file.""" + + out = [] + + from io import open + with open(filename, "r", encoding='latin-1') as f: + for entry in __read_raw_entries(f): + out.append(__write_ldif_one(__transform_entry(entry, objectClass))) + + return "\n\n".join(out) + + +def read_ms_schema(attr_file, classes_file, dump_attributes=True, dump_classes=True, debug=False): + """Read WSPP documentation-derived schema files.""" + + attr_ldif = "" + classes_ldif = "" + + if dump_attributes: + attr_ldif = __parse_schema_file(attr_file, "attributeSchema") + if dump_classes: + classes_ldif = __parse_schema_file(classes_file, "classSchema") + + return attr_ldif + "\n\n" + classes_ldif + "\n\n" + + +if __name__ == '__main__': + import sys + + try: + attr_file = sys.argv[1] + classes_file = sys.argv[2] + except IndexError: + print("Usage: %s attr-file.txt classes-file.txt" % (sys.argv[0]), file=sys.stderr) + sys.exit(1) + + print(read_ms_schema(attr_file, classes_file)) diff --git a/python/samba/ms_schema_markdown.py b/python/samba/ms_schema_markdown.py new file mode 100644 index 0000000..8a9ad78 --- /dev/null +++ b/python/samba/ms_schema_markdown.py @@ -0,0 +1,78 @@ +# Create schema.ldif from Github markdown +# +# Each LDF section in the markdown file then gets written to a corresponding +# .LDF output file. +# +# Copyright (C) Andrew Bartlett 2017 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +"""Generate LDIF from Github documentation.""" + +import re +import os +import markdown +import xml.etree.ElementTree as ET + + +def innertext(tag): + return (tag.text or '') + \ + ''.join(innertext(e) for e in tag) + \ + (tag.tail or '') + + +def read_ms_markdown(in_file, out_folder): + """Read Github documentation-derived schema files.""" + + with open(in_file) as update_file: + # Remove any comments from the raw LDF files + html = markdown.markdown(re.sub(r'(?m)^# .*\n?', '', update_file.read()), + output_format='xhtml') + + tree = ET.fromstring('' + html + '') + + ldf = None + try: + for node in tree: + if node.tag == 'h3': + if ldf is not None: + ldf.close() + + out_path = os.path.join(out_folder, innertext(node).strip()) + ldf = open(out_path, 'w') + elif node.tag == 'h2': + if ldf is not None: + ldf.close() + ldf = None + elif node.tag == 'p' and ldf is not None: + ldf.write(innertext(node).replace('```', '') + '\n') + finally: + if ldf is not None: + ldf.close() + + +if __name__ == '__main__': + import sys + + out_folder = '' + + if len(sys.argv) == 0: + print("Usage: %s []" % (sys.argv[0]), file=sys.stderr) + sys.exit(1) + + in_file = sys.argv[1] + if len(sys.argv) > 2: + out_folder = sys.argv[2] + + read_ms_markdown(in_file, out_folder) diff --git a/python/samba/ndr.py b/python/samba/ndr.py new file mode 100644 index 0000000..4207ee2 --- /dev/null +++ b/python/samba/ndr.py @@ -0,0 +1,153 @@ +# -*- coding: utf-8 -*- + +# Unix SMB/CIFS implementation. +# Copyright © Jelmer Vernooij 2008 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + + +"""Network Data Representation (NDR) marshalling and unmarshalling.""" + + +def ndr_pack(object): + """Pack a NDR object. + + :param object: Object to pack + :return: String object with marshalled object. + """ + ndr_pack = getattr(object, "__ndr_pack__", None) + if ndr_pack is None: + raise TypeError("%r is not a NDR object" % object) + return ndr_pack() + + +def ndr_unpack(cls, data, allow_remaining=False): + """NDR unpack an object. + + :param cls: Class of the object to unpack + :param data: Buffer to unpack + :param allow_remaining: allows remaining data at the end (default=False) + :return: Unpacked object + """ + object = cls() + ndr_unpack = getattr(object, "__ndr_unpack__", None) + if ndr_unpack is None: + raise TypeError("%r is not a NDR object" % object) + ndr_unpack(data, allow_remaining=allow_remaining) + return object + + +def ndr_print(object): + ndr_print = getattr(object, "__ndr_print__", None) + if ndr_print is None: + raise TypeError(f"{object} is not a NDR object") + return ndr_print() + + +def ndr_deepcopy(object): + """Create a deep copy of a NDR object, using pack/unpack + + :param object: Object to copy + :return: The object copy + """ + ndr_pack = getattr(object, "__ndr_pack__", None) + if ndr_pack is None: + raise TypeError("%r is not a NDR object" % object) + data = ndr_pack() + cls = type(object) + copy = cls() + ndr_unpack = getattr(copy, "__ndr_unpack__", None) + if ndr_unpack is None: + raise TypeError("%r is not a NDR object" % copy) + ndr_unpack(data, allow_remaining=False) + return copy + + +def ndr_pack_in(object, bigendian=False, ndr64=False): + """Pack the input of an NDR function object. + + :param object: Object to pack + :param bigendian: use LIBNDR_FLAG_BIGENDIAN (default=False) + :param ndr64: use LIBNDR_FLAG_NDR64 (default=False) + :return: String object with marshalled object. + """ + ndr_pack_in_fn = getattr(object, "__ndr_pack_in__", None) + if ndr_pack_in_fn is None: + raise TypeError("%r is not a NDR function object" % object) + return ndr_pack_in_fn(bigendian=bigendian, ndr64=ndr64) + + +def ndr_unpack_in(object, data, bigendian=False, ndr64=False, allow_remaining=False): + """Unpack the input of an NDR function object. + + :param cls: Class of the object to unpack + :param data: Buffer to unpack + :param bigendian: use LIBNDR_FLAG_BIGENDIAN (default=False) + :param ndr64: use LIBNDR_FLAG_NDR64 (default=False) + :param allow_remaining: allows remaining data at the end (default=False) + :return: Unpacked object + """ + ndr_unpack_in_fn = getattr(object, "__ndr_unpack_in__", None) + if ndr_unpack_in_fn is None: + raise TypeError("%r is not a NDR function object" % object) + ndr_unpack_in_fn(data, bigendian=bigendian, ndr64=ndr64, + allow_remaining=allow_remaining) + return object + + +def ndr_print_in(object): + ndr_print_in_fn = getattr(object, "__ndr_print_in__", None) + if ndr_print_in_fn is None: + raise TypeError("%r is not a NDR function object" % object) + return ndr_print_in_fn() + + +def ndr_pack_out(object, bigendian=False, ndr64=False): + """Pack the output of an NDR function object. + + :param object: Object to pack + :param bigendian: use LIBNDR_FLAG_BIGENDIAN (default=False) + :param ndr64: use LIBNDR_FLAG_NDR64 (default=False) + :return: String object with marshalled object. + """ + ndr_pack_out_fn = getattr(object, "__ndr_pack_out__", None) + if ndr_pack_out_fn is None: + raise TypeError("%r is not a NDR function object" % object) + return ndr_pack_out_fn(bigendian=bigendian, ndr64=ndr64) + + +def ndr_unpack_out(object, data, bigendian=False, ndr64=False, allow_remaining=False): + """Unpack the output of an NDR function object. + + :param cls: Class of the object to unpack + :param data: Buffer to unpack + :param bigendian: use LIBNDR_FLAG_BIGENDIAN (default=False) + :param ndr64: use LIBNDR_FLAG_NDR64 (default=False) + :param allow_remaining: allows remaining data at the end (default=False) + :return: Unpacked object + """ + ndr_unpack_out_fn = getattr(object, "__ndr_unpack_out__", None) + if ndr_unpack_out_fn is None: + raise TypeError("%r is not a NDR function object" % object) + ndr_unpack_out_fn(data, bigendian=bigendian, ndr64=ndr64, + allow_remaining=allow_remaining) + return object + + +def ndr_print_out(object): + ndr_print_out_fn = getattr(object, "__ndr_print_out__", None) + if ndr_print_out_fn is None: + raise TypeError("%r is not a NDR function object" % object) + return ndr_print_out_fn() diff --git a/python/samba/netcmd/__init__.py b/python/samba/netcmd/__init__.py new file mode 100644 index 0000000..7ddc1dc --- /dev/null +++ b/python/samba/netcmd/__init__.py @@ -0,0 +1,396 @@ +# Unix SMB/CIFS implementation. +# Copyright (C) Jelmer Vernooij 2009-2012 +# Copyright (C) Theresa Halloran 2011 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import json +import optparse +import sys +import textwrap +import traceback + +import samba +from ldb import ERR_INVALID_CREDENTIALS, LdbError +from samba import colour +from samba.auth import system_session +from samba.getopt import Option, OptionParser +from samba.logger import get_samba_logger +from samba.samdb import SamDB +from samba.dcerpc.security import SDDLValueError + +from .encoders import JSONEncoder + + +class PlainHelpFormatter(optparse.IndentedHelpFormatter): + """This help formatter does text wrapping and preserves newlines.""" + + def format_description(self, description=""): + desc_width = self.width - self.current_indent + indent = " " * self.current_indent + paragraphs = description.split('\n') + wrapped_paragraphs = [ + textwrap.fill(p, + desc_width, + initial_indent=indent, + subsequent_indent=indent) + for p in paragraphs] + result = "\n".join(wrapped_paragraphs) + "\n" + return result + + def format_epilog(self, epilog): + if epilog: + return "\n" + epilog + "\n" + else: + return "" + + +class Command(object): + """A samba-tool command.""" + + def _get_short_description(self): + return self.__doc__.splitlines()[0].rstrip("\n") + + short_description = property(_get_short_description) + + def _get_full_description(self): + lines = self.__doc__.split("\n") + return lines[0] + "\n" + textwrap.dedent("\n".join(lines[1:])) + + full_description = property(_get_full_description) + + def _get_name(self): + name = self.__class__.__name__ + if name.startswith("cmd_"): + return name[4:] + return name + + name = property(_get_name) + + # synopsis must be defined in all subclasses in order to provide the + # command usage + synopsis = None + takes_args = [] + takes_options = [] + takes_optiongroups = {} + + hidden = False + use_colour = True + requested_colour = None + + raw_argv = None + raw_args = None + raw_kwargs = None + + def _set_files(self, outf=None, errf=None): + if outf is not None: + self.outf = outf + if errf is not None: + self.errf = errf + + def __init__(self, outf=sys.stdout, errf=sys.stderr): + self._set_files(outf, errf) + + def usage(self, prog=None): + parser, _ = self._create_parser(prog) + parser.print_usage() + + def _print_error(self, msg, evalue=None, klass=None): + err = colour.c_DARK_RED("ERROR") + klass = '' if klass is None else f'({klass})' + + if evalue is None: + print(f"{err}{klass}: {msg}", file=self.errf) + else: + print(f"{err}{klass}: {msg} - {evalue}", file=self.errf) + + def _print_sddl_value_error(self, e): + generic_msg, specific_msg, position, sddl = e.args + print(f"{colour.c_DARK_RED('ERROR')}: {generic_msg}\n", + file=self.errf) + print(f' {sddl}', file=self.errf) + # If the SDDL contains non-ascii characters, the byte offset + # provided by the exception won't agree with the visual offset + # because those characters will be encoded as multiple bytes. + # + # To account for this we'll attempt to measure the string + # length of the specified number of bytes. That is not quite + # the same as the visual length, because the SDDL could + # contain zero-width, full-width, or combining characters, but + # it is closer. + try: + position = len((sddl.encode()[:position]).decode()) + except ValueError: + # use the original position + pass + + print(f"{colour.c_DARK_YELLOW('^'):>{position + 2}}", file=self.errf) + print(f' {specific_msg}', file=self.errf) + + def ldb_connect(self, hostopts, sambaopts, credopts): + """Helper to connect to Ldb database using command line opts.""" + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp) + return SamDB(hostopts.H, credentials=creds, + session_info=system_session(lp), lp=lp) + + def print_json(self, data): + """Print json on the screen using consistent formatting and sorting. + + A custom JSONEncoder class is used to help with serializing unknown + objects such as Dn for example. + """ + json.dump(data, self.outf, cls=JSONEncoder, indent=2, sort_keys=True) + self.outf.write("\n") + + def show_command_error(self, e): + """display a command error""" + if isinstance(e, CommandError): + (etype, evalue, etraceback) = e.exception_info + inner_exception = e.inner_exception + message = e.message + force_traceback = False + else: + (etype, evalue, etraceback) = sys.exc_info() + inner_exception = e + message = "uncaught exception" + force_traceback = True + + if isinstance(e, optparse.OptParseError): + print(evalue, file=self.errf) + self.usage() + force_traceback = False + + elif isinstance(inner_exception, LdbError): + (ldb_ecode, ldb_emsg) = inner_exception.args + if ldb_ecode == ERR_INVALID_CREDENTIALS: + print("Invalid username or password", file=self.errf) + force_traceback = False + elif ldb_emsg == 'LDAP client internal error: NT_STATUS_NETWORK_UNREACHABLE': + print("Could not reach remote server", file=self.errf) + force_traceback = False + elif ldb_emsg.startswith("Unable to open tdb "): + self._print_error(message, ldb_emsg, 'ldb') + force_traceback = False + else: + self._print_error(message, ldb_emsg, 'ldb') + + elif isinstance(inner_exception, SDDLValueError): + self._print_sddl_value_error(inner_exception) + force_traceback = False + + elif isinstance(inner_exception, AssertionError): + self._print_error(message, klass='assert') + force_traceback = True + elif isinstance(inner_exception, RuntimeError): + self._print_error(message, evalue, 'runtime') + elif type(inner_exception) is Exception: + self._print_error(message, evalue, 'exception') + force_traceback = True + elif inner_exception is None: + self._print_error(message) + else: + self._print_error(message, evalue, str(etype)) + + if force_traceback or samba.get_debug_level() >= 3: + traceback.print_tb(etraceback, file=self.errf) + + def _create_parser(self, prog=None, epilog=None): + parser = OptionParser( + usage=self.synopsis, + description=self.full_description, + formatter=PlainHelpFormatter(), + prog=prog, + epilog=epilog, + option_class=Option) + parser.add_options(self.takes_options) + optiongroups = {} + for name in sorted(self.takes_optiongroups.keys()): + optiongroup = self.takes_optiongroups[name] + optiongroups[name] = optiongroup(parser) + parser.add_option_group(optiongroups[name]) + if self.use_colour: + parser.add_option("--color", + help="use colour if available (default: auto)", + metavar="always|never|auto", + default="auto") + + return parser, optiongroups + + def message(self, text): + self.outf.write(text + "\n") + + def _resolve(self, path, *argv, outf=None, errf=None): + """This is a leaf node, the command that will actually run.""" + self._set_files(outf, errf) + self.command_name = path + return (self, argv) + + def _run(self, *argv): + parser, optiongroups = self._create_parser(self.command_name) + + # Handle possible validation errors raised by parser + try: + opts, args = parser.parse_args(list(argv)) + except Exception as e: + self.show_command_error(e) + return -1 + + # Filter out options from option groups + kwargs = dict(opts.__dict__) + for option_group in parser.option_groups: + for option in option_group.option_list: + if option.dest is not None and option.dest in kwargs: + del kwargs[option.dest] + kwargs.update(optiongroups) + + if self.use_colour: + self.apply_colour_choice(kwargs.pop('color', 'auto')) + + # Check for a min a max number of allowed arguments, whenever possible + # The suffix "?" means zero or one occurrence + # The suffix "+" means at least one occurrence + # The suffix "*" means zero or more occurrences + min_args = 0 + max_args = 0 + undetermined_max_args = False + for i, arg in enumerate(self.takes_args): + if arg[-1] != "?" and arg[-1] != "*": + min_args += 1 + if arg[-1] == "+" or arg[-1] == "*": + undetermined_max_args = True + else: + max_args += 1 + if (len(args) < min_args) or (not undetermined_max_args and len(args) > max_args): + parser.print_usage() + return -1 + + self.raw_argv = list(argv) + self.raw_args = args + self.raw_kwargs = kwargs + + try: + return self.run(*args, **kwargs) + except Exception as e: + self.show_command_error(e) + return -1 + + def run(self, *args, **kwargs): + """Run the command. This should be overridden by all subclasses.""" + raise NotImplementedError(f"'{self.command_name}' run method not implemented") + + def get_logger(self, name="", verbose=False, quiet=False, **kwargs): + """Get a logger object.""" + return get_samba_logger( + name=name or self.name, stream=self.errf, + verbose=verbose, quiet=quiet, + **kwargs) + + def apply_colour_choice(self, requested): + """Heuristics to work out whether the user wants colour output, from a + --color=yes|no|auto option. This alters the ANSI 16 bit colour + "constants" in the colour module to be either real colours or empty + strings. + """ + self.requested_colour = requested + try: + colour.colour_if_wanted(self.outf, + self.errf, + hint=requested) + except ValueError as e: + raise CommandError(f"Unknown --color option: {requested} " + "please choose from always|never|auto") + + +class SuperCommand(Command): + """A samba-tool command with subcommands.""" + + synopsis = "%prog " + + subcommands = {} + + def _resolve(self, path, *args, outf=None, errf=None): + """This is an internal node. We need to consume one of the args and + find the relevant child, returning an instance of that Command. + + If there are no children, this SuperCommand will be returned + and its _run() will do a --help like thing. + """ + self.command_name = path + self._set_files(outf, errf) + + # We collect up certain option arguments and pass them to the + # leaf, which is why we iterate over args, though we really + # expect to return in the first iteration. + deferred_args = [] + + for i, a in enumerate(args): + if a in self.subcommands: + sub_args = args[i + 1:] + tuple(deferred_args) + sub_path = f'{path} {a}' + + sub = self.subcommands[a] + return sub._resolve(sub_path, *sub_args, outf=outf, errf=errf) + + elif a in ['--help', 'help', None, '-h', '-V', '--version']: + # we pass these to the leaf node. + if a == 'help': + a = '--help' + deferred_args.append(a) + continue + + # they are talking nonsense + print("%s: no such subcommand: %s\n" % (path, a), file=self.outf) + return (self, []) + + # We didn't find a subcommand, but maybe we found e.g. --version + print("%s: missing subcommand\n" % (path), file=self.outf) + return (self, deferred_args) + + def _run(self, *argv): + epilog = "\nAvailable subcommands:\n" + + subcmds = sorted(self.subcommands.keys()) + max_length = max([len(c) for c in subcmds]) + for cmd_name in subcmds: + cmd = self.subcommands[cmd_name] + if cmd.hidden: + continue + epilog += " %*s - %s\n" % ( + -max_length, cmd_name, cmd.short_description) + + epilog += ("For more help on a specific subcommand, please type: " + f"{self.command_name} (-h|--help)\n") + + parser, optiongroups = self._create_parser(self.command_name, epilog=epilog) + opts, args = parser.parse_args(list(argv)) + + # note: if argv had --help, parser.parse_args() will have + # already done the .print_help() and attempted to exit with + # return code 0, so we won't get here. + parser.print_help() + return -1 + + +class CommandError(Exception): + """An exception class for samba-tool Command errors.""" + + def __init__(self, message, inner_exception=None): + self.message = message + self.inner_exception = inner_exception + self.exception_info = sys.exc_info() + + def __repr__(self): + return "CommandError(%s)" % self.message diff --git a/python/samba/netcmd/common.py b/python/samba/netcmd/common.py new file mode 100644 index 0000000..2aa50c7 --- /dev/null +++ b/python/samba/netcmd/common.py @@ -0,0 +1,161 @@ +# common functions for samba-tool python commands +# +# Copyright Andrew Tridgell 2010 +# Copyright Giampaolo Lauria 2011 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import re +from samba.dcerpc import nbt +from samba.net import Net +from samba.netcmd import CommandError +import ldb + + +# In MS AD, setting a timeout to '(never)' corresponds to this value +NEVER_TIMESTAMP = int(-0x8000000000000000) + + +def _get_user_realm_domain(user, sam=None): + r""" get the realm or the domain and the base user + from user like: + * username + * DOMAIN\username + * username@REALM + + A SamDB object can also be passed in to check + our domain or realm against the obtained ones. + """ + baseuser = user + m = re.match(r"(\w+)\\(\w+$)", user) + if m: + domain = m.group(1) + baseuser = m.group(2) + + if sam is not None: + our_domain = sam.domain_netbios_name() + if domain.lower() != our_domain.lower(): + raise CommandError(f"Given domain '{domain}' does not match " + f"our domain '{our_domain}'!") + + return (baseuser.lower(), "", domain.upper()) + + realm = "" + m = re.match(r"(\w+)@(\w+)", user) + if m: + baseuser = m.group(1) + realm = m.group(2) + + if sam is not None: + our_realm = sam.domain_dns_name() + our_realm_initial = our_realm.split('.', 1)[0] + if realm.lower() != our_realm_initial.lower(): + raise CommandError(f"Given realm '{realm}' does not match our " + f"realm '{our_realm}'!") + + return (baseuser.lower(), realm.upper(), "") + + +def netcmd_dnsname(lp): + """return the full DNS name of our own host. Used as a default + for hostname when running status queries""" + return lp.get('netbios name').lower() + "." + lp.get('realm').lower() + + +def netcmd_finddc(lp, creds, realm=None): + """Return domain-name of a writable/ldap-capable DC for the default + domain (parameter "realm" in smb.conf) unless another realm has been + specified as argument""" + net = Net(creds=creds, lp=lp) + if realm is None: + realm = lp.get('realm') + cldap_ret = net.finddc(domain=realm, + flags=nbt.NBT_SERVER_LDAP | nbt.NBT_SERVER_DS | nbt.NBT_SERVER_WRITABLE) + return cldap_ret.pdc_dns_name + + +def netcmd_get_domain_infos_via_cldap(lp, creds, address=None): + """Return domain information (CLDAP record) of the ldap-capable + DC with the specified address""" + net = Net(creds=creds, lp=lp) + cldap_ret = net.finddc(address=address, + flags=nbt.NBT_SERVER_LDAP | nbt.NBT_SERVER_DS) + return cldap_ret + +def is_printable_attr_val(val): + import unicodedata + + # The value must be convertible to a string value. + try: + str_val = str(val) + except: + return False + + # Characters of the Unicode Character Category "C" ("Other") are + # supposed to be not printable. The category "C" includes control + # characters, format specifier and others. + for c in str_val: + if unicodedata.category(c)[0] == 'C': + return False + + return True + +def get_ldif_for_editor(samdb, msg): + + # Copy the given message, because we do not + # want to modify the original message. + m = ldb.Message() + m.dn = msg.dn + + for k in msg.keys(): + if k == "dn": + continue + vals = msg[k] + m[k] = vals + need_base64 = False + for v in vals: + if is_printable_attr_val(v): + continue + need_base64 = True + break + if not need_base64: + m[k].set_flags(ldb.FLAG_FORCE_NO_BASE64_LDIF) + + result_ldif = samdb.write_ldif(m, ldb.CHANGETYPE_NONE) + + return result_ldif + + +def timestamp_to_mins(timestamp_str): + """Converts a timestamp in -100 nanosecond units to minutes""" + # treat a timestamp of 'never' the same as zero (this should work OK for + # most settings, and it displays better than trying to convert + # -0x8000000000000000 to minutes) + if int(timestamp_str) == NEVER_TIMESTAMP: + return 0 + else: + return abs(int(timestamp_str)) / (1e7 * 60) + + +def timestamp_to_days(timestamp_str): + """Converts a timestamp in -100 nanosecond units to days""" + return timestamp_to_mins(timestamp_str) / (60 * 24) + + +def attr_default(msg, attrname, default): + """get an attribute from a ldap msg with a default""" + if attrname in msg: + return msg[attrname][0] + return default diff --git a/python/samba/netcmd/computer.py b/python/samba/netcmd/computer.py new file mode 100644 index 0000000..1413803 --- /dev/null +++ b/python/samba/netcmd/computer.py @@ -0,0 +1,729 @@ +# machine account (computer) management +# +# Copyright Bjoern Baumbch 2018 +# +# based on user management +# Copyright Jelmer Vernooij 2010 +# Copyright Theresa Halloran 2011 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import samba.getopt as options +import ldb +import socket +import samba +import re +import os +import tempfile +from samba import sd_utils +from samba.dcerpc import dnsserver, dnsp, security +from samba.dnsserver import ARecord, AAAARecord +from samba.ndr import ndr_unpack, ndr_pack, ndr_print +from samba.remove_dc import remove_dns_references +from samba.auth import system_session +from samba.samdb import SamDB +from samba.common import get_bytes +from subprocess import check_call, CalledProcessError +from . import common + +from samba import ( + credentials, + dsdb, + Ldb, + werror, + WERRORError +) + +from samba.netcmd import ( + Command, + CommandError, + SuperCommand, + Option, +) + +def _is_valid_ip(ip_string, address_families=None): + """Check ip string is valid address""" + # by default, check both ipv4 and ipv6 + if not address_families: + address_families = [socket.AF_INET, socket.AF_INET6] + + for address_family in address_families: + try: + socket.inet_pton(address_family, ip_string) + return True # if no error, return directly + except socket.error: + continue # Otherwise, check next family + return False + + +def _is_valid_ipv4(ip_string): + """Check ip string is valid ipv4 address""" + return _is_valid_ip(ip_string, address_families=[socket.AF_INET]) + + +def _is_valid_ipv6(ip_string): + """Check ip string is valid ipv6 address""" + return _is_valid_ip(ip_string, address_families=[socket.AF_INET6]) + + +def add_dns_records( + samdb, name, dns_conn, change_owner_sd, + server, ip_address_list, logger): + """Add DNS A or AAAA records while creating computer. """ + name = name.rstrip('$') + client_version = dnsserver.DNS_CLIENT_VERSION_LONGHORN + select_flags = dnsserver.DNS_RPC_VIEW_AUTHORITY_DATA | dnsserver.DNS_RPC_VIEW_NO_CHILDREN + zone = samdb.domain_dns_name() + name_found = True + sd_helper = sd_utils.SDUtils(samdb) + + try: + buflen, res = dns_conn.DnssrvEnumRecords2( + client_version, + 0, + server, + zone, + name, + None, + dnsp.DNS_TYPE_ALL, + select_flags, + None, + None, + ) + except WERRORError as e: + if e.args[0] == werror.WERR_DNS_ERROR_NAME_DOES_NOT_EXIST: + name_found = False + + if name_found: + for rec in res.rec: + for record in rec.records: + if record.wType == dnsp.DNS_TYPE_A or record.wType == dnsp.DNS_TYPE_AAAA: + # delete record + del_rec_buf = dnsserver.DNS_RPC_RECORD_BUF() + del_rec_buf.rec = record + try: + dns_conn.DnssrvUpdateRecord2( + client_version, + 0, + server, + zone, + name, + None, + del_rec_buf, + ) + except WERRORError as e: + if e.args[0] != werror.WERR_DNS_ERROR_NAME_DOES_NOT_EXIST: + raise + + for ip_address in ip_address_list: + if _is_valid_ipv6(ip_address): + logger.info("Adding DNS AAAA record %s.%s for IPv6 IP: %s" % ( + name, zone, ip_address)) + rec = AAAARecord(ip_address) + elif _is_valid_ipv4(ip_address): + logger.info("Adding DNS A record %s.%s for IPv4 IP: %s" % ( + name, zone, ip_address)) + rec = ARecord(ip_address) + else: + raise ValueError('Invalid IP: {}'.format(ip_address)) + + # Add record + add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF() + add_rec_buf.rec = rec + + dns_conn.DnssrvUpdateRecord2( + client_version, + 0, + server, + zone, + name, + add_rec_buf, + None, + ) + + if (len(ip_address_list) > 0): + domaindns_zone_dn = ldb.Dn( + samdb, + 'DC=DomainDnsZones,%s' % samdb.get_default_basedn(), + ) + + dns_a_dn, ldap_record = samdb.dns_lookup( + "%s.%s" % (name, zone), + dns_partition=domaindns_zone_dn, + ) + + # Make the DC own the DNS record, not the administrator + sd_helper.modify_sd_on_dn( + dns_a_dn, + change_owner_sd, + controls=["sd_flags:1:%d" % (security.SECINFO_OWNER | security.SECINFO_GROUP)], + ) + + +class cmd_computer_add(Command): + """Add a new computer. + +This command adds a new computer account to the Active Directory domain. +The computername specified on the command is the sAMaccountName without the +trailing $ (dollar sign). + +Computer accounts may represent physical entities, such as workstations. Computer +accounts are also referred to as security principals and are assigned a +security identifier (SID). + +Example1: +samba-tool computer add Computer1 -H ldap://samba.samdom.example.com \\ + -Uadministrator%passw1rd + +Example1 shows how to add a new computer to the domain against a remote LDAP +server. The -H parameter is used to specify the remote target server. The -U +option is used to pass the userid and password authorized to issue the command +remotely. + +Example2: +sudo samba-tool computer add Computer2 + +Example2 shows how to add a new computer to the domain against the local +server. sudo is used so a user may run the command as root. + +Example3: +samba-tool computer add Computer3 --computerou='OU=OrgUnit' + +Example3 shows how to add a new computer in the OrgUnit organizational unit. + +""" + synopsis = "%prog [options]" + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", + type=str, metavar="URL", dest="H"), + Option("--computerou", + help=("DN of alternative location (with or without domainDN " + "counterpart) to default CN=Computers in which new " + "computer object will be created. E.g. 'OU='"), + type=str), + Option("--description", help="Computer's description", type=str), + Option("--prepare-oldjoin", + help="Prepare enabled machine account for oldjoin mechanism", + action="store_true"), + Option("--ip-address", + dest='ip_address_list', + help=("IPv4 address for the computer's A record, or IPv6 " + "address for AAAA record, can be provided multiple " + "times"), + action='append'), + Option("--service-principal-name", + dest='service_principal_name_list', + help=("Computer's Service Principal Name, can be provided " + "multiple times"), + action='append') + ] + + takes_args = ["computername"] + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "versionopts": options.VersionOptions, + } + + def run(self, computername, credopts=None, sambaopts=None, versionopts=None, + H=None, computerou=None, description=None, prepare_oldjoin=False, + ip_address_list=None, service_principal_name_list=None): + + if ip_address_list is None: + ip_address_list = [] + + if service_principal_name_list is None: + service_principal_name_list = [] + + # check each IP address if provided + for ip_address in ip_address_list: + if not _is_valid_ip(ip_address): + raise CommandError('Invalid IP address {}'.format(ip_address)) + + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp) + + try: + samdb = SamDB(url=H, session_info=system_session(), + credentials=creds, lp=lp) + samdb.newcomputer(computername, computerou=computerou, + description=description, + prepare_oldjoin=prepare_oldjoin, + ip_address_list=ip_address_list, + service_principal_name_list=service_principal_name_list, + ) + + if ip_address_list: + # if ip_address_list provided, then we need to create DNS + # records for this computer. + + hostname = re.sub(r"\$$", "", computername) + if hostname.count('$'): + raise CommandError('Illegal computername "%s"' % computername) + + filters = '(&(sAMAccountName={}$)(objectclass=computer))'.format( + ldb.binary_encode(hostname)) + + recs = samdb.search( + base=samdb.domain_dn(), + scope=ldb.SCOPE_SUBTREE, + expression=filters, + attrs=['primaryGroupID', 'objectSid']) + + group = recs[0]['primaryGroupID'][0] + owner = ndr_unpack(security.dom_sid, recs[0]["objectSid"][0]) + + dns_conn = dnsserver.dnsserver( + "ncacn_ip_tcp:{}[sign]".format(samdb.host_dns_name()), + lp, creds) + + change_owner_sd = security.descriptor() + change_owner_sd.owner_sid = owner + change_owner_sd.group_sid = security.dom_sid( + "{}-{}".format(samdb.get_domain_sid(), group), + ) + + add_dns_records( + samdb, hostname, dns_conn, + change_owner_sd, samdb.host_dns_name(), + ip_address_list, self.get_logger()) + except Exception as e: + raise CommandError("Failed to add computer '%s': " % + computername, e) + + self.outf.write("Computer '%s' added successfully\n" % computername) + + +class cmd_computer_delete(Command): + """Delete a computer. + +This command deletes a computer account from the Active Directory domain. The +computername specified on the command is the sAMAccountName without the +trailing $ (dollar sign). + +Once the account is deleted, all permissions and memberships associated with +that account are deleted. If a new computer account is added with the same name +as a previously deleted account name, the new computer does not have the +previous permissions. The new account computer will be assigned a new security +identifier (SID) and permissions and memberships will have to be added. + +The command may be run from the root userid or another authorized +userid. The -H or --URL= option can be used to execute the command against +a remote server. + +Example1: +samba-tool computer delete Computer1 -H ldap://samba.samdom.example.com \\ + -Uadministrator%passw1rd + +Example1 shows how to delete a computer in the domain against a remote LDAP +server. The -H parameter is used to specify the remote target server. The +--computername= and --password= options are used to pass the computername and +password of a computer that exists on the remote server and is authorized to +issue the command on that server. + +Example2: +sudo samba-tool computer delete Computer2 + +Example2 shows how to delete a computer in the domain against the local server. +sudo is used so a computer may run the command as root. + +""" + synopsis = "%prog [options]" + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", + type=str, metavar="URL", dest="H"), + ] + + takes_args = ["computername"] + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "versionopts": options.VersionOptions, + } + + def run(self, computername, credopts=None, sambaopts=None, + versionopts=None, H=None): + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp, fallback_machine=True) + + samdb = SamDB(url=H, session_info=system_session(), + credentials=creds, lp=lp) + + samaccountname = computername + if not computername.endswith('$'): + samaccountname = "%s$" % computername + + filter = ("(&(sAMAccountName=%s)(sAMAccountType=%u))" % + (ldb.binary_encode(samaccountname), + dsdb.ATYPE_WORKSTATION_TRUST)) + try: + res = samdb.search(base=samdb.domain_dn(), + scope=ldb.SCOPE_SUBTREE, + expression=filter, + attrs=["userAccountControl", "dNSHostName"]) + computer_dn = res[0].dn + computer_ac = int(res[0]["userAccountControl"][0]) + if "dNSHostName" in res[0]: + computer_dns_host_name = str(res[0]["dNSHostName"][0]) + else: + computer_dns_host_name = None + except IndexError: + raise CommandError('Unable to find computer "%s"' % computername) + + computer_is_workstation = ( + computer_ac & dsdb.UF_WORKSTATION_TRUST_ACCOUNT) + if not computer_is_workstation: + raise CommandError('Failed to remove computer "%s": ' + 'Computer is not a workstation - removal denied' + % computername) + try: + samdb.delete(computer_dn) + if computer_dns_host_name: + remove_dns_references( + samdb, self.get_logger(), computer_dns_host_name, + ignore_no_name=True) + except Exception as e: + raise CommandError('Failed to remove computer "%s"' % + samaccountname, e) + self.outf.write("Deleted computer %s\n" % computername) + + +class cmd_computer_edit(Command): + """Modify Computer AD object. + + This command will allow editing of a computer account in the Active + Directory domain. You will then be able to add or change attributes and + their values. + + The computername specified on the command is the sAMaccountName with or + without the trailing $ (dollar sign). + + The command may be run from the root userid or another authorized userid. + + The -H or --URL= option can be used to execute the command against a remote + server. + + Example1: + samba-tool computer edit Computer1 -H ldap://samba.samdom.example.com \\ + -U administrator --password=passw1rd + + Example1 shows how to edit a computers attributes in the domain against a + remote LDAP server. + + The -H parameter is used to specify the remote target server. + + Example2: + samba-tool computer edit Computer2 + + Example2 shows how to edit a computers attributes in the domain against a + local LDAP server. + + Example3: + samba-tool computer edit Computer3 --editor=nano + + Example3 shows how to edit a computers attributes in the domain against a + local LDAP server using the 'nano' editor. + """ + synopsis = "%prog [options]" + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", + type=str, metavar="URL", dest="H"), + Option("--editor", help="Editor to use instead of the system default," + " or 'vi' if no system default is set.", type=str), + ] + + takes_args = ["computername"] + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "versionopts": options.VersionOptions, + } + + def run(self, computername, credopts=None, sambaopts=None, versionopts=None, + H=None, editor=None): + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp, fallback_machine=True) + samdb = SamDB(url=H, session_info=system_session(), + credentials=creds, lp=lp) + + samaccountname = computername + if not computername.endswith('$'): + samaccountname = "%s$" % computername + + filter = ("(&(sAMAccountType=%d)(sAMAccountName=%s))" % + (dsdb.ATYPE_WORKSTATION_TRUST, + ldb.binary_encode(samaccountname))) + + domaindn = samdb.domain_dn() + + try: + res = samdb.search(base=domaindn, + expression=filter, + scope=ldb.SCOPE_SUBTREE) + computer_dn = res[0].dn + except IndexError: + raise CommandError('Unable to find computer "%s"' % (computername)) + + if len(res) != 1: + raise CommandError('Invalid number of results: for "%s": %d' % + ((computername), len(res))) + + msg = res[0] + result_ldif = common.get_ldif_for_editor(samdb, msg) + + if editor is None: + editor = os.environ.get('EDITOR') + if editor is None: + editor = 'vi' + + with tempfile.NamedTemporaryFile(suffix=".tmp") as t_file: + t_file.write(get_bytes(result_ldif)) + t_file.flush() + try: + check_call([editor, t_file.name]) + except CalledProcessError as e: + raise CalledProcessError("ERROR: ", e) + with open(t_file.name) as edited_file: + edited_message = edited_file.read() + + msgs_edited = samdb.parse_ldif(edited_message) + msg_edited = next(msgs_edited)[1] + + res_msg_diff = samdb.msg_diff(msg, msg_edited) + if len(res_msg_diff) == 0: + self.outf.write("Nothing to do\n") + return + + try: + samdb.modify(res_msg_diff) + except Exception as e: + raise CommandError("Failed to modify computer '%s': " % + computername, e) + + self.outf.write("Modified computer '%s' successfully\n" % computername) + +class cmd_computer_list(Command): + """List all computers.""" + + synopsis = "%prog [options]" + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", + type=str, metavar="URL", dest="H"), + Option("-b", "--base-dn", + help="Specify base DN to use", + type=str), + Option("--full-dn", dest="full_dn", + default=False, + action="store_true", + help="Display DN instead of the sAMAccountName.") + ] + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "versionopts": options.VersionOptions, + } + + def run(self, + sambaopts=None, + credopts=None, + versionopts=None, + H=None, + base_dn=None, + full_dn=False): + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp, fallback_machine=True) + + samdb = SamDB(url=H, session_info=system_session(), + credentials=creds, lp=lp) + + filter = "(sAMAccountType=%u)" % (dsdb.ATYPE_WORKSTATION_TRUST) + + search_dn = samdb.domain_dn() + if base_dn: + search_dn = samdb.normalize_dn_in_domain(base_dn) + + res = samdb.search(search_dn, + scope=ldb.SCOPE_SUBTREE, + expression=filter, + attrs=["samaccountname"]) + if (len(res) == 0): + return + + for msg in res: + if full_dn: + self.outf.write("%s\n" % msg.get("dn")) + continue + + self.outf.write("%s\n" % msg.get("samaccountname", idx=0)) + + +class cmd_computer_show(Command): + """Display a computer AD object. + +This command displays a computer account and it's attributes in the Active +Directory domain. +The computername specified on the command is the sAMAccountName. + +The command may be run from the root userid or another authorized +userid. + +The -H or --URL= option can be used to execute the command against a remote +server. + +Example1: +samba-tool computer show Computer1 -H ldap://samba.samdom.example.com \\ + -U administrator + +Example1 shows how display a computers attributes in the domain against a +remote LDAP server. + +The -H parameter is used to specify the remote target server. + +Example2: +samba-tool computer show Computer2 + +Example2 shows how to display a computers attributes in the domain against a +local LDAP server. + +Example3: +samba-tool computer show Computer2 --attributes=objectSid,operatingSystem + +Example3 shows how to display a computers objectSid and operatingSystem +attribute. +""" + synopsis = "%prog [options]" + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", + type=str, metavar="URL", dest="H"), + Option("--attributes", + help=("Comma separated list of attributes, " + "which will be printed."), + type=str, dest="computer_attrs"), + ] + + takes_args = ["computername"] + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "versionopts": options.VersionOptions, + } + + def run(self, computername, credopts=None, sambaopts=None, versionopts=None, + H=None, computer_attrs=None): + + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp, fallback_machine=True) + samdb = SamDB(url=H, session_info=system_session(), + credentials=creds, lp=lp) + + attrs = None + if computer_attrs: + attrs = computer_attrs.split(",") + + samaccountname = computername + if not computername.endswith('$'): + samaccountname = "%s$" % computername + + filter = ("(&(sAMAccountType=%d)(sAMAccountName=%s))" % + (dsdb.ATYPE_WORKSTATION_TRUST, + ldb.binary_encode(samaccountname))) + + domaindn = samdb.domain_dn() + + try: + res = samdb.search(base=domaindn, expression=filter, + scope=ldb.SCOPE_SUBTREE, attrs=attrs) + computer_dn = res[0].dn + except IndexError: + raise CommandError('Unable to find computer "%s"' % + samaccountname) + + for msg in res: + computer_ldif = common.get_ldif_for_editor(samdb, msg) + self.outf.write(computer_ldif) + + +class cmd_computer_move(Command): + """Move a computer to an organizational unit/container.""" + + synopsis = "%prog [options]" + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", + type=str, metavar="URL", dest="H"), + ] + + takes_args = ["computername", "new_ou_dn"] + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "versionopts": options.VersionOptions, + } + + def run(self, computername, new_ou_dn, credopts=None, sambaopts=None, + versionopts=None, H=None): + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp, fallback_machine=True) + samdb = SamDB(url=H, session_info=system_session(), + credentials=creds, lp=lp) + domain_dn = ldb.Dn(samdb, samdb.domain_dn()) + + samaccountname = computername + if not computername.endswith('$'): + samaccountname = "%s$" % computername + + filter = ("(&(sAMAccountName=%s)(sAMAccountType=%u))" % + (ldb.binary_encode(samaccountname), + dsdb.ATYPE_WORKSTATION_TRUST)) + try: + res = samdb.search(base=domain_dn, + expression=filter, + scope=ldb.SCOPE_SUBTREE) + computer_dn = res[0].dn + except IndexError: + raise CommandError('Unable to find computer "%s"' % (computername)) + + full_new_ou_dn = ldb.Dn(samdb, new_ou_dn) + if not full_new_ou_dn.is_child_of(domain_dn): + full_new_ou_dn.add_base(domain_dn) + new_computer_dn = ldb.Dn(samdb, str(computer_dn)) + new_computer_dn.remove_base_components(len(computer_dn) -1) + new_computer_dn.add_base(full_new_ou_dn) + try: + samdb.rename(computer_dn, new_computer_dn) + except Exception as e: + raise CommandError('Failed to move computer "%s"' % computername, e) + self.outf.write('Moved computer "%s" to "%s"\n' % + (computername, new_ou_dn)) + + +class cmd_computer(SuperCommand): + """Computer management.""" + + subcommands = {} + subcommands["add"] = cmd_computer_add() + subcommands["create"] = cmd_computer_add() + subcommands["delete"] = cmd_computer_delete() + subcommands["edit"] = cmd_computer_edit() + subcommands["list"] = cmd_computer_list() + subcommands["show"] = cmd_computer_show() + subcommands["move"] = cmd_computer_move() diff --git a/python/samba/netcmd/contact.py b/python/samba/netcmd/contact.py new file mode 100644 index 0000000..064a3ce --- /dev/null +++ b/python/samba/netcmd/contact.py @@ -0,0 +1,861 @@ +# samba-tool contact management +# +# Copyright Bjoern Baumbach 2019 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import samba.getopt as options +import ldb +import os +import tempfile +from subprocess import check_call, CalledProcessError +from operator import attrgetter +from samba.auth import system_session +from samba.samdb import SamDB +from samba import ( + credentials, + dsdb, +) +from samba.net import Net + +from samba.netcmd import ( + Command, + CommandError, + SuperCommand, + Option, +) +from samba.common import get_bytes +from . import common + + +class cmd_add(Command): + """Add a new contact. + + This command adds a new contact to the Active Directory domain. + + The name of the new contact can be specified by the first argument + 'contactname' or the --given-name, --initial and --surname arguments. + If no 'contactname' is given, contact's name will be made up of the given + arguments by combining the given-name, initials and surname. Each argument + is optional. A dot ('.') will be appended to the initials automatically. + + Example1: + samba-tool contact add "James T. Kirk" --job-title=Captain \\ + -H ldap://samba.samdom.example.com -UAdministrator%Passw1rd + + The example shows how to add a new contact to the domain against a remote + LDAP server. + + Example2: + samba-tool contact add --given-name=James --initials=T --surname=Kirk + + The example shows how to add a new contact to the domain against a local + server. The resulting name is "James T. Kirk". + """ + + synopsis = "%prog [contactname] [options]" + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", + type=str, metavar="URL", dest="H"), + Option("--ou", + help=("DN of alternative location (with or without domainDN " + "counterpart) in which the new contact will be created. " + "E.g. 'OU='. " + "Default is the domain base."), + type=str), + Option("--surname", help="Contact's surname", type=str), + Option("--given-name", help="Contact's given name", type=str), + Option("--initials", help="Contact's initials", type=str), + Option("--display-name", help="Contact's display name", type=str), + Option("--job-title", help="Contact's job title", type=str), + Option("--department", help="Contact's department", type=str), + Option("--company", help="Contact's company", type=str), + Option("--description", help="Contact's description", type=str), + Option("--mail-address", help="Contact's email address", type=str), + Option("--internet-address", help="Contact's home page", type=str), + Option("--telephone-number", help="Contact's phone number", type=str), + Option("--mobile-number", + help="Contact's mobile phone number", + type=str), + Option("--physical-delivery-office", + help="Contact's office location", + type=str), + ] + + takes_args = ["fullcontactname?"] + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "versionopts": options.VersionOptions, + } + + def run(self, + fullcontactname=None, + sambaopts=None, + credopts=None, + versionopts=None, + H=None, + ou=None, + surname=None, + given_name=None, + initials=None, + display_name=None, + job_title=None, + department=None, + company=None, + description=None, + mail_address=None, + internet_address=None, + telephone_number=None, + mobile_number=None, + physical_delivery_office=None): + + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp) + + try: + samdb = SamDB(url=H, + session_info=system_session(), + credentials=creds, + lp=lp) + ret_name = samdb.newcontact( + fullcontactname=fullcontactname, + ou=ou, + surname=surname, + givenname=given_name, + initials=initials, + displayname=display_name, + jobtitle=job_title, + department=department, + company=company, + description=description, + mailaddress=mail_address, + internetaddress=internet_address, + telephonenumber=telephone_number, + mobilenumber=mobile_number, + physicaldeliveryoffice=physical_delivery_office) + except Exception as e: + raise CommandError("Failed to add contact", e) + + self.outf.write("Contact '%s' added successfully\n" % ret_name) + + +class cmd_delete(Command): + """Delete a contact. + + This command deletes a contact object from the Active Directory domain. + + The contactname specified on the command is the common name or the + distinguished name of the contact object. The distinguished name of the + contact can be specified with or without the domainDN component. + + Example: + samba-tool contact delete Contact1 \\ + -H ldap://samba.samdom.example.com \\ + --username=Administrator --password=Passw1rd + + The example shows how to delete a contact in the domain against a remote + LDAP server. + """ + synopsis = "%prog [options]" + + takes_options = [ + Option("-H", + "--URL", + help="LDB URL for database or target server", + type=str, + metavar="URL", + dest="H"), + ] + + takes_args = ["contactname"] + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "versionopts": options.VersionOptions, + } + + def run(self, + contactname, + sambaopts=None, + credopts=None, + versionopts=None, + H=None): + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp, fallback_machine=True) + samdb = SamDB(url=H, + session_info=system_session(), + credentials=creds, + lp=lp) + base_dn = samdb.domain_dn() + scope = ldb.SCOPE_SUBTREE + + filter = ("(&(objectClass=contact)(name=%s))" % + ldb.binary_encode(contactname)) + + if contactname.upper().startswith("CN="): + # contact is specified by DN + filter = "(objectClass=contact)" + scope = ldb.SCOPE_BASE + try: + base_dn = samdb.normalize_dn_in_domain(contactname) + except Exception as e: + raise CommandError('Invalid dn "%s": %s' % + (contactname, e)) + + try: + res = samdb.search(base=base_dn, + scope=scope, + expression=filter, + attrs=["dn"]) + contact_dn = res[0].dn + except IndexError: + raise CommandError('Unable to find contact "%s"' % (contactname)) + + if len(res) > 1: + for msg in sorted(res, key=attrgetter('dn')): + self.outf.write("found: %s\n" % msg.dn) + raise CommandError("Multiple results for contact '%s'\n" + "Please specify the contact's full DN" % + contactname) + + try: + samdb.delete(contact_dn) + except Exception as e: + raise CommandError('Failed to remove contact "%s"' % contactname, e) + self.outf.write("Deleted contact %s\n" % contactname) + + +class cmd_list(Command): + """List all contacts. + """ + + synopsis = "%prog [options]" + + takes_options = [ + Option("-H", + "--URL", + help="LDB URL for database or target server", + type=str, + metavar="URL", + dest="H"), + Option("-b", "--base-dn", + help="Specify base DN to use.", + type=str), + Option("--full-dn", + dest="full_dn", + default=False, + action='store_true', + help="Display contact's full DN instead of the name."), + ] + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "versionopts": options.VersionOptions, + } + + def run(self, + sambaopts=None, + credopts=None, + versionopts=None, + H=None, + base_dn=None, + full_dn=False): + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp, fallback_machine=True) + + samdb = SamDB(url=H, + session_info=system_session(), + credentials=creds, + lp=lp) + + search_dn = samdb.domain_dn() + if base_dn: + search_dn = samdb.normalize_dn_in_domain(base_dn) + + res = samdb.search(search_dn, + scope=ldb.SCOPE_SUBTREE, + expression="(objectClass=contact)", + attrs=["name"]) + if (len(res) == 0): + return + + if full_dn: + for msg in sorted(res, key=attrgetter('dn')): + self.outf.write("%s\n" % msg.dn) + return + + for msg in res: + contact_name = msg.get("name", idx=0) + + self.outf.write("%s\n" % contact_name) + + +class cmd_edit(Command): + """Modify a contact. + + This command will allow editing of a contact object in the Active Directory + domain. You will then be able to add or change attributes and their values. + + The contactname specified on the command is the common name or the + distinguished name of the contact object. The distinguished name of the + contact can be specified with or without the domainDN component. + + The command may be run from the root userid or another authorized userid. + + The -H or --URL= option can be used to execute the command against a remote + server. + + Example1: + samba-tool contact edit Contact1 -H ldap://samba.samdom.example.com \\ + -U Administrator --password=Passw1rd + + Example1 shows how to edit a contact's attributes in the domain against a + remote LDAP server. + + The -H parameter is used to specify the remote target server. + + Example2: + samba-tool contact edit CN=Contact2,OU=people,DC=samdom,DC=example,DC=com + + Example2 shows how to edit a contact's attributes in the domain against a + local server. The contact, which is located in the 'people' OU, + is specified by the full distinguished name. + + Example3: + samba-tool contact edit Contact3 --editor=nano + + Example3 shows how to edit a contact's attributes in the domain against a + local server using the 'nano' editor. + """ + synopsis = "%prog [options]" + + takes_options = [ + Option("-H", + "--URL", + help="LDB URL for database or target server", + type=str, + metavar="URL", + dest="H"), + Option("--editor", + help="Editor to use instead of the system default, " + "or 'vi' if no system default is set.", + type=str), + ] + + takes_args = ["contactname"] + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "versionopts": options.VersionOptions, + } + + def run(self, + contactname, + sambaopts=None, + credopts=None, + versionopts=None, + H=None, + editor=None): + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp, fallback_machine=True) + samdb = SamDB(url=H, session_info=system_session(), + credentials=creds, lp=lp) + base_dn = samdb.domain_dn() + scope = ldb.SCOPE_SUBTREE + + filter = ("(&(objectClass=contact)(name=%s))" % + ldb.binary_encode(contactname)) + + if contactname.upper().startswith("CN="): + # contact is specified by DN + filter = "(objectClass=contact)" + scope = ldb.SCOPE_BASE + try: + base_dn = samdb.normalize_dn_in_domain(contactname) + except Exception as e: + raise CommandError('Invalid dn "%s": %s' % + (contactname, e)) + + try: + res = samdb.search(base=base_dn, + scope=scope, + expression=filter) + contact_dn = res[0].dn + except IndexError: + raise CommandError('Unable to find contact "%s"' % (contactname)) + + if len(res) > 1: + for msg in sorted(res, key=attrgetter('dn')): + self.outf.write("found: %s\n" % msg.dn) + raise CommandError("Multiple results for contact '%s'\n" + "Please specify the contact's full DN" % + contactname) + + for msg in res: + result_ldif = common.get_ldif_for_editor(samdb, msg) + + if editor is None: + editor = os.environ.get('EDITOR') + if editor is None: + editor = 'vi' + + with tempfile.NamedTemporaryFile(suffix=".tmp") as t_file: + t_file.write(get_bytes(result_ldif)) + t_file.flush() + try: + check_call([editor, t_file.name]) + except CalledProcessError as e: + raise CalledProcessError("ERROR: ", e) + with open(t_file.name) as edited_file: + edited_message = edited_file.read() + + + msgs_edited = samdb.parse_ldif(edited_message) + msg_edited = next(msgs_edited)[1] + + res_msg_diff = samdb.msg_diff(msg, msg_edited) + if len(res_msg_diff) == 0: + self.outf.write("Nothing to do\n") + return + + try: + samdb.modify(res_msg_diff) + except Exception as e: + raise CommandError("Failed to modify contact '%s': " % contactname, + e) + + self.outf.write("Modified contact '%s' successfully\n" % contactname) + + +class cmd_show(Command): + """Display a contact. + + This command displays a contact object with it's attributes in the Active + Directory domain. + + The contactname specified on the command is the common name or the + distinguished name of the contact object. The distinguished name of the + contact can be specified with or without the domainDN component. + + The command may be run from the root userid or another authorized userid. + + The -H or --URL= option can be used to execute the command against a remote + server. + + Example1: + samba-tool contact show Contact1 -H ldap://samba.samdom.example.com \\ + -U Administrator --password=Passw1rd + + Example1 shows how to display a contact's attributes in the domain against + a remote LDAP server. + + The -H parameter is used to specify the remote target server. + + Example2: + samba-tool contact show CN=Contact2,OU=people,DC=samdom,DC=example,DC=com + + Example2 shows how to display a contact's attributes in the domain against + a local server. The contact, which is located in the 'people' OU, is + specified by the full distinguished name. + + Example3: + samba-tool contact show Contact3 --attributes=mail,mobile + + Example3 shows how to display a contact's mail and mobile attributes. + """ + synopsis = "%prog [options]" + + takes_options = [ + Option("-H", + "--URL", + help="LDB URL for database or target server", + type=str, + metavar="URL", + dest="H"), + Option("--attributes", + help=("Comma separated list of attributes, " + "which will be printed."), + type=str, + dest="contact_attrs"), + ] + + takes_args = ["contactname"] + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "versionopts": options.VersionOptions, + } + + def run(self, + contactname, + sambaopts=None, + credopts=None, + versionopts=None, + H=None, + contact_attrs=None): + + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp, fallback_machine=True) + samdb = SamDB(url=H, + session_info=system_session(), + credentials=creds, + lp=lp) + base_dn = samdb.domain_dn() + scope = ldb.SCOPE_SUBTREE + + attrs = None + if contact_attrs: + attrs = contact_attrs.split(",") + + filter = ("(&(objectClass=contact)(name=%s))" % + ldb.binary_encode(contactname)) + + if contactname.upper().startswith("CN="): + # contact is specified by DN + filter = "(objectClass=contact)" + scope = ldb.SCOPE_BASE + try: + base_dn = samdb.normalize_dn_in_domain(contactname) + except Exception as e: + raise CommandError('Invalid dn "%s": %s' % + (contactname, e)) + + try: + res = samdb.search(base=base_dn, + expression=filter, + scope=scope, + attrs=attrs) + contact_dn = res[0].dn + except IndexError: + raise CommandError('Unable to find contact "%s"' % (contactname)) + + if len(res) > 1: + for msg in sorted(res, key=attrgetter('dn')): + self.outf.write("found: %s\n" % msg.dn) + raise CommandError("Multiple results for contact '%s'\n" + "Please specify the contact's DN" % + contactname) + + for msg in res: + contact_ldif = common.get_ldif_for_editor(samdb, msg) + self.outf.write(contact_ldif) + + +class cmd_move(Command): + """Move a contact object to an organizational unit or container. + + The contactname specified on the command is the common name or the + distinguished name of the contact object. The distinguished name of the + contact can be specified with or without the domainDN component. + + The name of the organizational unit or container can be specified as the + distinguished name, with or without the domainDN component. + + The command may be run from the root userid or another authorized userid. + + The -H or --URL= option can be used to execute the command against a remote + server. + + Example1: + samba-tool contact move Contact1 'OU=people' \\ + -H ldap://samba.samdom.example.com -U Administrator + + Example1 shows how to move a contact Contact1 into the 'people' + organizational unit on a remote LDAP server. + + The -H parameter is used to specify the remote target server. + + Example2: + samba-tool contact move Contact1 OU=Contacts,DC=samdom,DC=example,DC=com + + Example2 shows how to move a contact Contact1 into the OU=Contacts + organizational unit on the local server. + """ + + synopsis = "%prog [options]" + + takes_options = [ + Option("-H", + "--URL", + help="LDB URL for database or target server", + type=str, + metavar="URL", + dest="H"), + ] + + takes_args = ["contactname", "new_parent_dn"] + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "versionopts": options.VersionOptions, + } + + def run(self, + contactname, + new_parent_dn, + sambaopts=None, + credopts=None, + versionopts=None, + H=None): + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp, fallback_machine=True) + samdb = SamDB(url=H, + session_info=system_session(), + credentials=creds, + lp=lp) + base_dn = samdb.domain_dn() + scope = ldb.SCOPE_SUBTREE + + filter = ("(&(objectClass=contact)(name=%s))" % + ldb.binary_encode(contactname)) + + if contactname.upper().startswith("CN="): + # contact is specified by DN + filter = "(objectClass=contact)" + scope = ldb.SCOPE_BASE + try: + base_dn = samdb.normalize_dn_in_domain(contactname) + except Exception as e: + raise CommandError('Invalid dn "%s": %s' % + (contactname, e)) + + try: + res = samdb.search(base=base_dn, + scope=scope, + expression=filter, + attrs=["dn"]) + contact_dn = res[0].dn + except IndexError: + raise CommandError('Unable to find contact "%s"' % (contactname)) + + if len(res) > 1: + for msg in sorted(res, key=attrgetter('dn')): + self.outf.write("found: %s\n" % msg.dn) + raise CommandError("Multiple results for contact '%s'\n" + "Please specify the contact's full DN" % + contactname) + + try: + full_new_parent_dn = samdb.normalize_dn_in_domain(new_parent_dn) + except Exception as e: + raise CommandError('Invalid new_parent_dn "%s": %s' % + (new_parent_dn, e)) + + full_new_contact_dn = ldb.Dn(samdb, str(contact_dn)) + full_new_contact_dn.remove_base_components(len(contact_dn) - 1) + full_new_contact_dn.add_base(full_new_parent_dn) + + try: + samdb.rename(contact_dn, full_new_contact_dn) + except Exception as e: + raise CommandError('Failed to move contact "%s"' % contactname, e) + self.outf.write('Moved contact "%s" into "%s"\n' % + (contactname, full_new_parent_dn)) + +class cmd_rename(Command): + """Rename a contact and related attributes. + + This command allows to set the contact's name related attributes. + The contact's new CN will be made up by combining the given-name, initials + and surname. A dot ('.') will be appended to the initials automatically, if + required. + Use the --force-new-cn option to specify the new CN manually and the + --reset-cn option to reset this changes. + + Use an empty attribute value to remove the specified attribute. + + The contactname specified on the command is the CN. + + The command may be run locally from the root userid or another authorized + userid. + + The -H or --URL= option can be used to execute the command against a remote + server. + + Example1: + samba-tool contact rename "John Doe" --surname=Bloggs \\ + --force-new-cn=John + + Example1 shows how to change the surname ('sn' attribute) of a contact + 'John Doe' to 'Bloggs' and change the CN to 'John' on the local server. + + Example2: + samba-tool contact rename "J Doe" --given-name=John + -H ldap://samba.samdom.example.com -U administrator + + Example2 shows how to rename the given name of a contact 'J Doe' to + 'John'. The contact's cn will be renamed automatically, based on + the given name, initials and surname, if the previous CN is the + standard combination of the previous name attributes. + The -H parameter is used to specify the remote target server. + """ + + synopsis = "%prog [options]" + + takes_options = [ + Option("-H", "--URL", + help="LDB URL for database or target server", + type=str, metavar="URL", dest="H"), + Option("--surname", + help="New surname", + type=str), + Option("--given-name", + help="New given name", + type=str), + Option("--initials", + help="New initials", + type=str), + Option("--force-new-cn", + help="Specify a new CN (RDN) instead of using a combination " + "of the given name, initials and surname.", + type=str, metavar="NEW_CN"), + Option("--reset-cn", + help="Set the CN (RDN) to the combination of the given name, " + "initials and surname. Use this option to reset " + "the changes made with the --force-new-cn option.", + action="store_true"), + Option("--display-name", + help="New display name", + type=str), + Option("--mail-address", + help="New email address", + type=str), + ] + + takes_args = ["contactname"] + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "versionopts": options.VersionOptions, + } + + + def run(self, contactname, credopts=None, sambaopts=None, versionopts=None, + H=None, surname=None, given_name=None, initials=None, force_new_cn=None, + display_name=None, mail_address=None, reset_cn=None): + # illegal options + if force_new_cn and reset_cn: + raise CommandError("It is not allowed to specify --force-new-cn " + "together with --reset-cn.") + if force_new_cn == "": + raise CommandError("Failed to rename contact - delete protected " + "attribute 'CN'") + + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp, fallback_machine=True) + samdb = SamDB(url=H, session_info=system_session(), + credentials=creds, lp=lp) + domain_dn = ldb.Dn(samdb, samdb.domain_dn()) + + filter = ("(&(objectClass=contact)(name=%s))" % + ldb.binary_encode(contactname)) + try: + res = samdb.search(base=domain_dn, + scope=ldb.SCOPE_SUBTREE, + expression=filter, + attrs=["name", + "sn", + "givenName", + "cn", + "initials", + "displayName", + "mail"] + ) + old_contact = res[0] + contact_dn = old_contact.dn + except IndexError: + raise CommandError('Unable to find contact "%s"' % (contactname)) + + contact_parent_dn = contact_dn.parent() + old_cn = old_contact["cn"][0] + + if force_new_cn is not None: + new_cn = force_new_cn + else: + new_cn = samdb.fullname_from_names(old_attrs=old_contact, + given_name=given_name, + initials=initials, + surname=surname) + + # change CN, if the new CN is different and the old CN is the + # standard CN or the change is forced with force-new-cn or reset-cn + excepted_cn = samdb.fullname_from_names(old_attrs=old_contact) + must_change_cn = str(old_cn) != str(new_cn) and \ + (str(old_cn) == str(excepted_cn) or \ + reset_cn or bool(force_new_cn)) + + new_contact_dn = ldb.Dn(samdb, "CN=%s" % new_cn) + new_contact_dn.add_base(contact_parent_dn) + + if new_cn == "" and must_change_cn: + raise CommandError("Failed to rename contact '%s' - " + "can not set an empty CN " + "(please use --force-new-cn to specify a " + "different CN or --given-name, --initials or " + "--surname to set name attributes)" % old_cn) + + # format given attributes + contact_attrs = ldb.Message() + contact_attrs.dn = contact_dn + samdb.prepare_attr_replace(contact_attrs, old_contact, "givenName", given_name) + samdb.prepare_attr_replace(contact_attrs, old_contact, "sn", surname) + samdb.prepare_attr_replace(contact_attrs, old_contact, "initials", initials) + samdb.prepare_attr_replace(contact_attrs, old_contact, "displayName", display_name) + samdb.prepare_attr_replace(contact_attrs, old_contact, "mail", mail_address) + + contact_attributes_changed = len(contact_attrs) > 0 + + # update the contact with formatted attributes + samdb.transaction_start() + try: + if contact_attributes_changed == True: + samdb.modify(contact_attrs) + if must_change_cn: + samdb.rename(contact_dn, new_contact_dn) + except Exception as e: + samdb.transaction_cancel() + raise CommandError('Failed to rename contact "%s"' % contactname, e) + samdb.transaction_commit() + + if must_change_cn: + self.outf.write('Renamed CN of contact "%s" from "%s" to "%s" ' + 'successfully\n' % (contactname, old_cn, new_cn)) + + if contact_attributes_changed: + self.outf.write('Following attributes of contact "%s" have been ' + 'changed successfully:\n' % (contactname)) + for attr in contact_attrs.keys(): + if attr == "dn": + continue + self.outf.write('%s: %s\n' % (attr, contact_attrs[attr] + if contact_attrs[attr] else '[removed]')) + +class cmd_contact(SuperCommand): + """Contact management.""" + + subcommands = {} + subcommands["add"] = cmd_add() + subcommands["create"] = cmd_add() + subcommands["delete"] = cmd_delete() + subcommands["edit"] = cmd_edit() + subcommands["list"] = cmd_list() + subcommands["move"] = cmd_move() + subcommands["show"] = cmd_show() + subcommands["rename"] = cmd_rename() diff --git a/python/samba/netcmd/dbcheck.py b/python/samba/netcmd/dbcheck.py new file mode 100644 index 0000000..657881b --- /dev/null +++ b/python/samba/netcmd/dbcheck.py @@ -0,0 +1,193 @@ +# Samba4 AD database checker +# +# Copyright (C) Andrew Tridgell 2011 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import optparse +import sys + +import ldb +import samba.getopt as options +from samba import colour +from samba.auth import system_session +from samba.dbchecker import dbcheck +from samba.samdb import SamDB + +from . import Command, CommandError, Option + + +class cmd_dbcheck(Command): + """Check local AD database for errors.""" + synopsis = "%prog [] [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptionsDouble, + } + + def process_yes(option, opt, value, parser): + assert value is None + rargs = parser.rargs + if rargs: + arg = rargs[0] + if ((arg[:2] == "--" and len(arg) > 2) or + (arg[:1] == "-" and len(arg) > 1 and arg[1] != "-")): + setattr(parser.values, "yes", True) + else: + setattr(parser.values, "yes_rules", arg.split()) + del rargs[0] + else: + setattr(parser.values, "yes", True) + + takes_args = ["DN?"] + + takes_options = [ + Option("--scope", dest="scope", default="SUB", + help="Pass search scope that builds DN list. Options: SUB, ONE, BASE"), + Option("--fix", dest="fix", default=False, action='store_true', + help='Fix any errors found'), + Option("--yes", action='callback', callback=process_yes, + help="don't confirm changes individually. Applies all as a single transaction (will not succeed if any errors are found)"), + Option("--cross-ncs", dest="cross_ncs", default=False, action='store_true', + help="cross naming context boundaries"), + Option("-v", "--verbose", dest="verbose", action="store_true", default=False, + help="Print more details of checking"), + Option("-q", "--quiet", action="store_true", default=False, + help="don't print details of checking"), + Option("--attrs", dest="attrs", default=None, help="list of attributes to check (space separated)"), + Option("--reindex", dest="reindex", default=False, action="store_true", help="force database re-index"), + Option("--force-modules", dest="force_modules", default=False, action="store_true", help="force loading of Samba modules and ignore the @MODULES record (for very old databases)"), + Option("--reset-well-known-acls", + dest="reset_well_known_acls", + default=False, action="store_true", + help=("reset ACLs on objects with well known default values" + " (for updating from early 4.0.x)")), + Option("--quick-membership-checks", dest="quick_membership_checks", + help=("Skips missing/orphaned memberOf backlinks checks, " + "but speeds up dbcheck dramatically for domains with " + "large groups"), + default=False, action="store_true"), + Option("-H", "--URL", help="LDB URL for database or target server (defaults to local SAM database)", + type=str, metavar="URL", dest="H"), + Option("--selftest-check-expired-tombstones", + dest="selftest_check_expired_tombstones", default=False, action="store_true", + help=optparse.SUPPRESS_HELP), # This is only used by tests + ] + + def run(self, DN=None, H=None, verbose=False, fix=False, yes=False, + cross_ncs=False, quiet=False, + scope="SUB", credopts=None, sambaopts=None, versionopts=None, + attrs=None, reindex=False, force_modules=False, + quick_membership_checks=False, + reset_well_known_acls=False, + selftest_check_expired_tombstones=False, + yes_rules=None): + + if yes_rules is None: + yes_rules = [] + + lp = sambaopts.get_loadparm() + + over_ldap = H is not None and H.startswith('ldap') + + if over_ldap: + creds = credopts.get_credentials(lp, fallback_machine=True) + else: + creds = None + + if force_modules: + samdb = SamDB(session_info=system_session(), url=H, + credentials=creds, lp=lp, options=["modules=samba_dsdb"]) + else: + try: + samdb = SamDB(session_info=system_session(), url=H, + credentials=creds, lp=lp) + except: + raise CommandError("Failed to connect to DB at %s. If this is a really old sam.ldb (before alpha9), then try again with --force-modules" % H) + + if H is None or not over_ldap: + samdb_schema = samdb + else: + samdb_schema = SamDB(session_info=system_session(), url=None, + credentials=creds, lp=lp) + + scope_map = {"SUB": ldb.SCOPE_SUBTREE, "BASE": ldb.SCOPE_BASE, "ONE": ldb.SCOPE_ONELEVEL} + scope = scope.upper() + if scope not in scope_map: + raise CommandError("Unknown scope %s" % scope) + search_scope = scope_map[scope] + + controls = ['show_deleted:1'] + if over_ldap: + controls.append('paged_results:1:1000') + if cross_ncs: + controls.append("search_options:1:2") + + if not attrs: + attrs = ['*'] + else: + attrs = attrs.split() + + # The dbcheck module always prints to stdout, not our self.outf + # (yes, maybe FIXME). + stdout_colour = colour.colour_if_wanted(sys.stdout, + hint=self.requested_colour) + + started_transaction = False + if yes and fix: + samdb.transaction_start() + started_transaction = True + try: + chk = dbcheck(samdb, samdb_schema=samdb_schema, verbose=verbose, + fix=fix, yes=yes, quiet=quiet, + in_transaction=started_transaction, + quick_membership_checks=quick_membership_checks, + reset_well_known_acls=reset_well_known_acls, + check_expired_tombstones=selftest_check_expired_tombstones, + colour=stdout_colour) + + for option in yes_rules: + if hasattr(chk, option): + setattr(chk, option, 'ALL') + else: + raise CommandError("Invalid fix rule %s" % option) + + if reindex: + self.outf.write("Re-indexing...\n") + error_count = 0 + if chk.reindex_database(): + self.outf.write("completed re-index OK\n") + + elif force_modules: + self.outf.write("Resetting @MODULES...\n") + error_count = 0 + if chk.reset_modules(): + self.outf.write("completed @MODULES reset OK\n") + + else: + error_count = chk.check_database(DN=DN, scope=search_scope, + controls=controls, attrs=attrs) + except: + if started_transaction: + samdb.transaction_cancel() + raise + + if started_transaction: + samdb.transaction_commit() + + if error_count != 0: + sys.exit(1) diff --git a/python/samba/netcmd/delegation.py b/python/samba/netcmd/delegation.py new file mode 100644 index 0000000..840be20 --- /dev/null +++ b/python/samba/netcmd/delegation.py @@ -0,0 +1,689 @@ +# delegation management +# +# Copyright Matthieu Patou mat@samba.org 2010 +# Copyright Stefan Metzmacher metze@samba.org 2011 +# Copyright Bjoern Baumbach bb@sernet.de 2011 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import samba.getopt as options +import ldb +from samba import provision +from samba import dsdb +from samba.samdb import SamDB +from samba.auth import system_session +from samba.dcerpc import security +from samba.ndr import ndr_pack, ndr_unpack +from samba.netcmd.common import _get_user_realm_domain +from samba.netcmd import ( + Command, + CommandError, + SuperCommand, + Option +) + + +class cmd_delegation_show(Command): + """Show the delegation setting of an account.""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "versionopts": options.VersionOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H"), + ] + + takes_args = ["accountname"] + + def show_security_descriptor(self, sam, security_descriptor): + dacl = security_descriptor.dacl + desc_type = security_descriptor.type + + warning_info = ('Security Descriptor of attribute ' + 'msDS-AllowedToActOnBehalfOfOtherIdentity') + + if dacl is None or not desc_type & security.SEC_DESC_DACL_PRESENT: + self.errf.write(f'Warning: DACL not present in {warning_info}!\n') + return + + if not desc_type & security.SEC_DESC_SELF_RELATIVE: + self.errf.write(f'Warning: DACL in {warning_info} lacks ' + f'SELF_RELATIVE flag!\n') + return + + first = True + + for ace in dacl.aces: + trustee = ace.trustee + + # Convert the trustee SID into a DN if we can. + try: + res = sam.search(f'', + scope=ldb.SCOPE_BASE) + except ldb.LdbError as err: + num, _ = err.args + if num != ldb.ERR_NO_SUCH_OBJECT: + raise + else: + if len(res) == 1: + trustee = res[0].dn + + ignore = False + + if (ace.type == security.SEC_ACE_TYPE_ACCESS_DENIED + or ace.type == security.SEC_ACE_TYPE_ACCESS_DENIED_OBJECT): + self.errf.write(f'Warning: ACE in {warning_info} denies ' + f'access for trustee {trustee}!\n') + # Ignore the ACE if it denies access + ignore = True + elif (ace.type != security.SEC_ACE_TYPE_ACCESS_ALLOWED + and ace.type != security.SEC_ACE_TYPE_ACCESS_ALLOWED_OBJECT): + # Ignore the ACE if it doesn't explicitly allow access + ignore = True + + inherit_only = ace.flags & security.SEC_ACE_FLAG_INHERIT_ONLY + object_inherit = ace.flags & security.SEC_ACE_FLAG_OBJECT_INHERIT + container_inherit = ( + ace.flags & security.SEC_ACE_FLAG_CONTAINER_INHERIT) + inherited_ace = ace.flags & security.SEC_ACE_FLAG_INHERITED_ACE + + if inherit_only and not object_inherit and not container_inherit: + # Ignore the ACE if it is propagated only to child objects, but + # neither of the object and container inherit flags are set. + ignore = True + else: + if container_inherit: + self.errf.write(f'Warning: ACE for trustee {trustee} has ' + f'unexpected CONTAINER_INHERIT flag set in ' + f'{warning_info}!\n') + ignore = True + + if inherited_ace: + self.errf.write(f'Warning: ACE for trustee {trustee} has ' + f'unexpected INHERITED_ACE flag set in ' + f'{warning_info}!\n') + ignore = True + + if not ace.access_mask: + # Ignore the ACE if it doesn't grant any permissions. + ignore = True + + if not ignore: + if first: + self.outf.write(' Principals that may delegate to this ' + 'account:\n') + first = False + + self.outf.write(f'msDS-AllowedToActOnBehalfOfOtherIdentity: ' + f'{trustee}\n') + + + def run(self, accountname, H=None, credopts=None, sambaopts=None, versionopts=None): + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp) + paths = provision.provision_paths_from_lp(lp, lp.get("realm")) + + if H is None: + path = paths.samdb + else: + path = H + + sam = SamDB(path, session_info=system_session(), + credentials=creds, lp=lp) + # TODO once I understand how, use the domain info to naildown + # to the correct domain + (cleanedaccount, realm, domain) = _get_user_realm_domain(accountname, + sam) + + res = sam.search(expression="sAMAccountName=%s" % + ldb.binary_encode(cleanedaccount), + scope=ldb.SCOPE_SUBTREE, + attrs=["userAccountControl", "msDS-AllowedToDelegateTo", + "msDS-AllowedToActOnBehalfOfOtherIdentity"]) + if len(res) == 0: + raise CommandError("Unable to find account name '%s'" % accountname) + assert(len(res) == 1) + + uac = int(res[0].get("userAccountControl")[0]) + allowed = res[0].get("msDS-AllowedToDelegateTo") + allowed_from = res[0].get("msDS-AllowedToActOnBehalfOfOtherIdentity", idx=0) + + self.outf.write("Account-DN: %s\n" % str(res[0].dn)) + self.outf.write("UF_TRUSTED_FOR_DELEGATION: %s\n" + % bool(uac & dsdb.UF_TRUSTED_FOR_DELEGATION)) + self.outf.write("UF_TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION: %s\n" % + bool(uac & dsdb.UF_TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION)) + + if allowed: + self.outf.write(" Services this account may delegate to:\n") + for a in allowed: + self.outf.write("msDS-AllowedToDelegateTo: %s\n" % a) + if allowed_from is not None: + try: + security_descriptor = ndr_unpack(security.descriptor, allowed_from) + except RuntimeError: + self.errf.write("Warning: Security Descriptor of attribute " + "msDS-AllowedToActOnBehalfOfOtherIdentity " + "could not be unmarshalled!\n") + else: + self.show_security_descriptor(sam, security_descriptor) + + +class cmd_delegation_for_any_service(Command): + """Set/unset UF_TRUSTED_FOR_DELEGATION for an account.""" + + synopsis = "%prog [(on|off)] [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "versionopts": options.VersionOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H"), + ] + + takes_args = ["accountname", "onoff"] + + def run(self, accountname, onoff, H=None, credopts=None, sambaopts=None, + versionopts=None): + + on = False + if onoff == "on": + on = True + elif onoff == "off": + on = False + else: + raise CommandError("invalid argument: '%s' (choose from 'on', 'off')" % onoff) + + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp) + paths = provision.provision_paths_from_lp(lp, lp.get("realm")) + if H is None: + path = paths.samdb + else: + path = H + + sam = SamDB(path, session_info=system_session(), + credentials=creds, lp=lp) + # TODO once I understand how, use the domain info to naildown + # to the correct domain + (cleanedaccount, realm, domain) = _get_user_realm_domain(accountname, + sam) + + search_filter = "sAMAccountName=%s" % ldb.binary_encode(cleanedaccount) + flag = dsdb.UF_TRUSTED_FOR_DELEGATION + try: + sam.toggle_userAccountFlags(search_filter, flag, + flags_str="Trusted-for-Delegation", + on=on, strict=True) + except Exception as err: + raise CommandError(err) + + +class cmd_delegation_for_any_protocol(Command): + """Set/unset UF_TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION (S4U2Proxy) for an account.""" + + synopsis = "%prog [(on|off)] [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "versionopts": options.VersionOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H"), + ] + + takes_args = ["accountname", "onoff"] + + def run(self, accountname, onoff, H=None, credopts=None, sambaopts=None, + versionopts=None): + + on = False + if onoff == "on": + on = True + elif onoff == "off": + on = False + else: + raise CommandError("invalid argument: '%s' (choose from 'on', 'off')" % onoff) + + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp, fallback_machine=True) + paths = provision.provision_paths_from_lp(lp, lp.get("realm")) + if H is None: + path = paths.samdb + else: + path = H + + sam = SamDB(path, session_info=system_session(), + credentials=creds, lp=lp) + # TODO once I understand how, use the domain info to naildown + # to the correct domain + (cleanedaccount, realm, domain) = _get_user_realm_domain(accountname, + sam) + + search_filter = "sAMAccountName=%s" % ldb.binary_encode(cleanedaccount) + flag = dsdb.UF_TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION + try: + sam.toggle_userAccountFlags(search_filter, flag, + flags_str="Trusted-to-Authenticate-for-Delegation", + on=on, strict=True) + except Exception as err: + raise CommandError(err) + + +class cmd_delegation_add_service(Command): + """Add a service principal to msDS-AllowedToDelegateTo so that an account may delegate to it.""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "versionopts": options.VersionOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H"), + ] + + takes_args = ["accountname", "principal"] + + def run(self, accountname, principal, H=None, credopts=None, sambaopts=None, + versionopts=None): + + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp) + paths = provision.provision_paths_from_lp(lp, lp.get("realm")) + if H is None: + path = paths.samdb + else: + path = H + + sam = SamDB(path, session_info=system_session(), + credentials=creds, lp=lp) + # TODO once I understand how, use the domain info to naildown + # to the correct domain + (cleanedaccount, realm, domain) = _get_user_realm_domain(accountname, + sam) + + res = sam.search(expression="sAMAccountName=%s" % + ldb.binary_encode(cleanedaccount), + scope=ldb.SCOPE_SUBTREE, + attrs=["msDS-AllowedToDelegateTo"]) + if len(res) == 0: + raise CommandError("Unable to find account name '%s'" % accountname) + assert(len(res) == 1) + + msg = ldb.Message() + msg.dn = res[0].dn + msg["msDS-AllowedToDelegateTo"] = ldb.MessageElement([principal], + ldb.FLAG_MOD_ADD, + "msDS-AllowedToDelegateTo") + try: + sam.modify(msg) + except Exception as err: + raise CommandError(err) + + +class cmd_delegation_del_service(Command): + """Delete a service principal from msDS-AllowedToDelegateTo so that an account may no longer delegate to it.""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "versionopts": options.VersionOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H"), + ] + + takes_args = ["accountname", "principal"] + + def run(self, accountname, principal, H=None, credopts=None, sambaopts=None, + versionopts=None): + + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp) + paths = provision.provision_paths_from_lp(lp, lp.get("realm")) + if H is None: + path = paths.samdb + else: + path = H + + sam = SamDB(path, session_info=system_session(), + credentials=creds, lp=lp) + # TODO once I understand how, use the domain info to naildown + # to the correct domain + (cleanedaccount, realm, domain) = _get_user_realm_domain(accountname, + sam) + + res = sam.search(expression="sAMAccountName=%s" % + ldb.binary_encode(cleanedaccount), + scope=ldb.SCOPE_SUBTREE, + attrs=["msDS-AllowedToDelegateTo"]) + if len(res) == 0: + raise CommandError("Unable to find account name '%s'" % accountname) + assert(len(res) == 1) + + msg = ldb.Message() + msg.dn = res[0].dn + msg["msDS-AllowedToDelegateTo"] = ldb.MessageElement([principal], + ldb.FLAG_MOD_DELETE, + "msDS-AllowedToDelegateTo") + try: + sam.modify(msg) + except Exception as err: + raise CommandError(err) + + +class cmd_delegation_add_principal(Command): + """Add a principal to msDS-AllowedToActOnBehalfOfOtherIdentity that may delegate to an account.""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "versionopts": options.VersionOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", + type=str, metavar="URL", dest="H"), + ] + + takes_args = ["accountname", "principal"] + + def run(self, accountname, principal, H=None, credopts=None, sambaopts=None, + versionopts=None): + + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp) + paths = provision.provision_paths_from_lp(lp, lp.get("realm")) + if H is None: + path = paths.samdb + else: + path = H + + sam = SamDB(path, session_info=system_session(), + credentials=creds, lp=lp) + # TODO once I understand how, use the domain info to naildown + # to the correct domain + cleanedaccount, _, _ = _get_user_realm_domain(accountname, sam) + + account_res = sam.search( + expression="sAMAccountName=%s" % + ldb.binary_encode(cleanedaccount), + scope=ldb.SCOPE_SUBTREE, + attrs=["msDS-AllowedToActOnBehalfOfOtherIdentity"]) + if len(account_res) == 0: + raise CommandError(f"Unable to find account name '{accountname}'") + assert(len(account_res) == 1) + + data = account_res[0].get( + "msDS-AllowedToActOnBehalfOfOtherIdentity", idx=0) + if data is None: + # Create the security descriptor if it is not present. + owner_sid = security.dom_sid(security.SID_BUILTIN_ADMINISTRATORS) + + security_desc = security.descriptor() + security_desc.revision = security.SD_REVISION + security_desc.type = (security.SEC_DESC_DACL_PRESENT | + security.SEC_DESC_SELF_RELATIVE) + security_desc.owner_sid = owner_sid + + dacl = None + else: + try: + security_desc = ndr_unpack(security.descriptor, data) + except RuntimeError: + raise CommandError(f"Security Descriptor of attribute " + f"msDS-AllowedToActOnBehalfOfOtherIdentity " + f"for account '{accountname}' could not be " + f"unmarshalled!") + + dacl = security_desc.dacl + + if dacl is None: + # Create the DACL if it is not present. + dacl = security.acl() + dacl.revision = security.SECURITY_ACL_REVISION_ADS + dacl.num_aces = 0 + + # TODO once I understand how, use the domain info to naildown + # to the correct domain + cleanedprinc, _, _ = _get_user_realm_domain(principal, sam) + + princ_res = sam.search(expression="sAMAccountName=%s" % + ldb.binary_encode(cleanedprinc), + scope=ldb.SCOPE_SUBTREE, + attrs=["objectSid"]) + if len(princ_res) == 0: + raise CommandError(f"Unable to find principal name '{principal}'") + assert(len(princ_res) == 1) + + princ_sid = security.dom_sid( + sam.schema_format_value( + "objectSID", + princ_res[0].get("objectSID", idx=0)).decode("utf-8")) + + aces = dacl.aces + + # Check that there is no existing ACE for this principal. + if any(ace.trustee == princ_sid for ace in aces): + raise CommandError( + f"ACE for principal '{principal}' already present in Security " + f"Descriptor of attribute " + f"msDS-AllowedToActOnBehalfOfOtherIdentity for account " + f"'{accountname}'.") + + # Create the new ACE. + ace = security.ace() + ace.type = security.SEC_ACE_TYPE_ACCESS_ALLOWED + ace.flags = 0 + ace.access_mask = security.SEC_ADS_GENERIC_ALL + ace.trustee = princ_sid + + aces.append(ace) + + dacl.aces = aces + dacl.num_aces += 1 + + security_desc.dacl = dacl + + new_data = ndr_pack(security_desc) + + # Set the new security descriptor. First, delete the original value to + # detect a race condition if someone else updates the attribute at the + # same time. + msg = ldb.Message() + msg.dn = account_res[0].dn + if data is not None: + msg["0"] = ldb.MessageElement( + data, ldb.FLAG_MOD_DELETE, + "msDS-AllowedToActOnBehalfOfOtherIdentity") + msg["1"] = ldb.MessageElement( + new_data, ldb.FLAG_MOD_ADD, + "msDS-AllowedToActOnBehalfOfOtherIdentity") + try: + sam.modify(msg) + except ldb.LdbError as err: + num, _ = err.args + if num == ldb.ERR_NO_SUCH_ATTRIBUTE: + raise CommandError( + f"Refused to update attribute " + f"msDS-AllowedToActOnBehalfOfOtherIdentity for account " + f"'{accountname}': a conflicting attribute update " + f"occurred simultaneously.") + else: + raise CommandError(err) + + +class cmd_delegation_del_principal(Command): + """Delete a principal from msDS-AllowedToActOnBehalfOfOtherIdentity that may no longer delegate to an account.""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "versionopts": options.VersionOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", + type=str, metavar="URL", dest="H"), + ] + + takes_args = ["accountname", "principal"] + + def run(self, accountname, principal, H=None, credopts=None, sambaopts=None, + versionopts=None): + + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp) + paths = provision.provision_paths_from_lp(lp, lp.get("realm")) + if H is None: + path = paths.samdb + else: + path = H + + sam = SamDB(path, session_info=system_session(), + credentials=creds, lp=lp) + # TODO once I understand how, use the domain info to naildown + # to the correct domain + cleanedaccount, _, _ = _get_user_realm_domain(accountname, sam) + + account_res = sam.search( + expression="sAMAccountName=%s" % + ldb.binary_encode(cleanedaccount), + scope=ldb.SCOPE_SUBTREE, + attrs=["msDS-AllowedToActOnBehalfOfOtherIdentity"]) + if len(account_res) == 0: + raise CommandError("Unable to find account name '%s'" % accountname) + assert(len(account_res) == 1) + + data = account_res[0].get( + "msDS-AllowedToActOnBehalfOfOtherIdentity", idx=0) + if data is None: + raise CommandError(f"Attribute " + f"msDS-AllowedToActOnBehalfOfOtherIdentity for " + f"account '{accountname}' not present!") + + try: + security_desc = ndr_unpack(security.descriptor, data) + except RuntimeError: + raise CommandError(f"Security Descriptor of attribute " + f"msDS-AllowedToActOnBehalfOfOtherIdentity for " + f"account '{accountname}' could not be " + f"unmarshalled!") + + dacl = security_desc.dacl + if dacl is None: + raise CommandError(f"DACL not present on Security Descriptor of " + f"attribute " + f"msDS-AllowedToActOnBehalfOfOtherIdentity for " + f"account '{accountname}'!") + + # TODO once I understand how, use the domain info to naildown + # to the correct domain + cleanedprinc, _, _ = _get_user_realm_domain(principal, sam) + + princ_res = sam.search(expression="sAMAccountName=%s" % + ldb.binary_encode(cleanedprinc), + scope=ldb.SCOPE_SUBTREE, + attrs=["objectSid"]) + if len(princ_res) == 0: + raise CommandError(f"Unable to find principal name '{principal}'") + assert(len(princ_res) == 1) + + princ_sid = security.dom_sid( + sam.schema_format_value( + "objectSID", + princ_res[0].get("objectSID", idx=0)).decode("utf-8")) + + old_aces = dacl.aces + + # Remove any ACEs relating to the specified principal. + aces = [ace for ace in old_aces if ace.trustee != princ_sid] + + # Raise an error if we didn't find any. + if len(aces) == len(old_aces): + raise CommandError(f"Unable to find ACE for principal " + f"'{principal}' in Security Descriptor of " + f"attribute " + f"msDS-AllowedToActOnBehalfOfOtherIdentity for " + f"account '{accountname}'.") + + dacl.num_aces = len(aces) + dacl.aces = aces + + security_desc.dacl = dacl + + new_data = ndr_pack(security_desc) + + # Set the new security descriptor. First, delete the original value to + # detect a race condition if someone else updates the attribute at the + # same time. + msg = ldb.Message() + msg.dn = account_res[0].dn + msg["0"] = ldb.MessageElement( + data, ldb.FLAG_MOD_DELETE, + "msDS-AllowedToActOnBehalfOfOtherIdentity") + msg["1"] = ldb.MessageElement( + new_data, ldb.FLAG_MOD_ADD, + "msDS-AllowedToActOnBehalfOfOtherIdentity") + try: + sam.modify(msg) + except ldb.LdbError as err: + num, _ = err.args + if num == ldb.ERR_NO_SUCH_ATTRIBUTE: + raise CommandError( + f"Refused to update attribute " + f"msDS-AllowedToActOnBehalfOfOtherIdentity for account " + f"'{accountname}': a conflicting attribute update " + f"occurred simultaneously.") + else: + raise CommandError(err) + + +class cmd_delegation(SuperCommand): + """Delegation management.""" + + subcommands = {} + subcommands["show"] = cmd_delegation_show() + subcommands["for-any-service"] = cmd_delegation_for_any_service() + subcommands["for-any-protocol"] = cmd_delegation_for_any_protocol() + subcommands["add-service"] = cmd_delegation_add_service() + subcommands["del-service"] = cmd_delegation_del_service() + subcommands["add-principal"] = cmd_delegation_add_principal() + subcommands["del-principal"] = cmd_delegation_del_principal() diff --git a/python/samba/netcmd/dns.py b/python/samba/netcmd/dns.py new file mode 100644 index 0000000..693fc9a --- /dev/null +++ b/python/samba/netcmd/dns.py @@ -0,0 +1,1394 @@ +# DNS management tool +# +# Copyright (C) Amitay Isaacs 2011-2012 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +import samba.getopt as options +from samba import WERRORError +from samba import werror +from struct import pack +from socket import inet_ntop, inet_pton +from socket import AF_INET +from socket import AF_INET6 +import struct +import time +import ldb +from samba.ndr import ndr_unpack, ndr_pack +import re + +from samba import remove_dc, dsdb_dns +from samba.samdb import SamDB +from samba.auth import system_session + +from samba.netcmd import ( + Command, + CommandError, + Option, + SuperCommand, +) +from samba.dcerpc import dnsp, dnsserver + +from samba.dnsserver import record_from_string, DNSParseError, flag_from_string +from samba.dnsserver import dns_record_match + + +def dns_connect(server, lp, creds): + if server.lower() == 'localhost': + server = '127.0.0.1' + binding_str = "ncacn_ip_tcp:%s[sign]" % server + try: + dns_conn = dnsserver.dnsserver(binding_str, lp, creds) + except RuntimeError as e: + raise CommandError('Connecting to DNS RPC server %s failed with %s' % (server, e)) + + return dns_conn + + +class DnsConnWrapper: + """A wrapper around a dnsserver.dnsserver connection that makes it + harder not to report friendly messages. + + If, rather than + + dns_conn = dns_connect(server, lp, creds) + + you use + + dns_conn = DnsConnWrapper(server, lp, creds) + + then various common errors (for example, misspelled zones) on + common operations will raise CommandErrors that turn into + relatively nice messages (when compared to tracebacks). + + In addition, if you provide a messages keyword argument, it will + override the defaults. Note that providing None will turn off the + default, letting the original exception shine through. + + messages = { + werror.WERR_DNS_ERROR_ZONE_DOES_NOT_EXIST: ( + f'Zone {zone} does not exist and so could not be deleted.'), + werror.WERR_DNS_ERROR_NAME_DOES_NOT_EXIST: None + } + res = dns_conn.DnssrvOperation2( # ... + messages=messages) + + This example changes the message for ZONE_DOES_NOT_EXIST and + avoids catching NAME_DOES_NOT_EXIST. + + Only WERRORErrors are intercepted. + """ + + default_messages = { + werror.WERR_DNS_ERROR_DS_UNAVAILABLE: "Could not contact RPC server", + werror.WERR_DNS_ERROR_ZONE_ALREADY_EXISTS: 'Zone already exists', + werror.WERR_DNS_ERROR_RECORD_DOES_NOT_EXIST: 'The record does not exist', + werror.WERR_DNS_ERROR_NAME_DOES_NOT_EXIST: 'The zone does not exist', + werror.WERR_ACCESS_DENIED: 'Insufficient permissions', + } + + def __init__(self, server, lp, creds): + self.dns_conn = dns_connect(server, lp, creds) + + def __getattr__(self, name): + attr = getattr(self.dns_conn, name) + if name not in { + "DnssrvComplexOperation2", + "DnssrvEnumRecords2", + "DnssrvOperation2", + "DnssrvQuery2", + "DnssrvUpdateRecord2"}: + return attr + + def f(*args, messages=None): + if messages is None: + messages = {} + + try: + return attr(*args) + except WERRORError as e: + werr, errstr = e.args + if werr in messages: + if werr is None: + # None overrides a default message, leaving the bare exception + raise + raise CommandError(f"{messages[werr]} [{errstr}]", e) + if werr in self.default_messages: + raise CommandError(f"{self.default_messages[werr]} [{errstr}]", e) + raise + + return f + + +def bool_string(flag): + if flag == 0: + ret = 'FALSE' + elif flag == 1: + ret = 'TRUE' + else: + ret = 'UNKNOWN (0x%x)' % flag + return ret + + +def enum_string(module, enum_defs, value): + ret = None + for e in enum_defs: + if value == getattr(module, e): + ret = e + break + if not ret: + ret = 'UNKNOWN (0x%x)' % value + return ret + + +def bitmap_string(module, bitmap_defs, value): + ret = '' + for b in bitmap_defs: + if value & getattr(module, b): + ret += '%s ' % b + if not ret: + ret = 'NONE' + return ret + + +def boot_method_string(boot_method): + enum_defs = ['DNS_BOOT_METHOD_UNINITIALIZED', 'DNS_BOOT_METHOD_FILE', + 'DNS_BOOT_METHOD_REGISTRY', 'DNS_BOOT_METHOD_DIRECTORY'] + return enum_string(dnsserver, enum_defs, boot_method) + + +def name_check_flag_string(check_flag): + enum_defs = ['DNS_ALLOW_RFC_NAMES_ONLY', 'DNS_ALLOW_NONRFC_NAMES', + 'DNS_ALLOW_MULTIBYTE_NAMES', 'DNS_ALLOW_ALL_NAMES'] + return enum_string(dnsserver, enum_defs, check_flag) + + +def zone_type_string(zone_type): + enum_defs = ['DNS_ZONE_TYPE_CACHE', 'DNS_ZONE_TYPE_PRIMARY', + 'DNS_ZONE_TYPE_SECONDARY', 'DNS_ZONE_TYPE_STUB', + 'DNS_ZONE_TYPE_FORWARDER', 'DNS_ZONE_TYPE_SECONDARY_CACHE'] + return enum_string(dnsp, enum_defs, zone_type) + + +def zone_update_string(zone_update): + enum_defs = ['DNS_ZONE_UPDATE_OFF', 'DNS_ZONE_UPDATE_UNSECURE', + 'DNS_ZONE_UPDATE_SECURE'] + return enum_string(dnsp, enum_defs, zone_update) + + +def zone_secondary_security_string(security): + enum_defs = ['DNS_ZONE_SECSECURE_NO_SECURITY', 'DNS_ZONE_SECSECURE_NS_ONLY', + 'DNS_ZONE_SECSECURE_LIST_ONLY', 'DNS_ZONE_SECSECURE_NO_XFER'] + return enum_string(dnsserver, enum_defs, security) + + +def zone_notify_level_string(notify_level): + enum_defs = ['DNS_ZONE_NOTIFY_OFF', 'DNS_ZONE_NOTIFY_ALL_SECONDARIES', + 'DNS_ZONE_NOTIFY_LIST_ONLY'] + return enum_string(dnsserver, enum_defs, notify_level) + + +def dp_flags_string(dp_flags): + bitmap_defs = ['DNS_DP_AUTOCREATED', 'DNS_DP_LEGACY', 'DNS_DP_DOMAIN_DEFAULT', + 'DNS_DP_FOREST_DEFAULT', 'DNS_DP_ENLISTED', 'DNS_DP_DELETED'] + return bitmap_string(dnsserver, bitmap_defs, dp_flags) + + +def zone_flags_string(flags): + bitmap_defs = ['DNS_RPC_ZONE_PAUSED', 'DNS_RPC_ZONE_SHUTDOWN', + 'DNS_RPC_ZONE_REVERSE', 'DNS_RPC_ZONE_AUTOCREATED', + 'DNS_RPC_ZONE_DSINTEGRATED', 'DNS_RPC_ZONE_AGING', + 'DNS_RPC_ZONE_UPDATE_UNSECURE', 'DNS_RPC_ZONE_UPDATE_SECURE', + 'DNS_RPC_ZONE_READONLY'] + return bitmap_string(dnsserver, bitmap_defs, flags) + + +def ip4_array_string(array): + ret = [] + if not array: + return ret + for i in range(array.AddrCount): + addr = inet_ntop(AF_INET, pack('I', array.AddrArray[i])) + ret.append(addr) + return ret + + +def dns_addr_array_string(array): + ret = [] + if not array: + return ret + for i in range(array.AddrCount): + if array.AddrArray[i].MaxSa[0] == 0x02: + x = struct.pack('4B', *array.AddrArray[i].MaxSa[4:8]) + addr = inet_ntop(AF_INET, x) + elif array.AddrArray[i].MaxSa[0] == 0x17: + x = struct.pack('16B', *array.AddrArray[i].MaxSa[8:24]) + addr = inet_ntop(AF_INET6, x) + else: + addr = 'UNKNOWN' + ret.append(addr) + return ret + + +def dns_type_flag(rec_type): + try: + return flag_from_string(rec_type) + except DNSParseError as e: + raise CommandError(*e.args) + + +def dns_client_version(cli_version): + version = cli_version.upper() + if version == 'W2K': + client_version = dnsserver.DNS_CLIENT_VERSION_W2K + elif version == 'DOTNET': + client_version = dnsserver.DNS_CLIENT_VERSION_DOTNET + elif version == 'LONGHORN': + client_version = dnsserver.DNS_CLIENT_VERSION_LONGHORN + else: + raise CommandError('Unknown client version %s' % cli_version) + return client_version + + +def print_serverinfo(outf, typeid, serverinfo): + outf.write(' dwVersion : 0x%x\n' % serverinfo.dwVersion) + outf.write(' fBootMethod : %s\n' % boot_method_string(serverinfo.fBootMethod)) + outf.write(' fAdminConfigured : %s\n' % bool_string(serverinfo.fAdminConfigured)) + outf.write(' fAllowUpdate : %s\n' % bool_string(serverinfo.fAllowUpdate)) + outf.write(' fDsAvailable : %s\n' % bool_string(serverinfo.fDsAvailable)) + outf.write(' pszServerName : %s\n' % serverinfo.pszServerName) + outf.write(' pszDsContainer : %s\n' % serverinfo.pszDsContainer) + + if typeid != dnsserver.DNSSRV_TYPEID_SERVER_INFO: + outf.write(' aipServerAddrs : %s\n' % + ip4_array_string(serverinfo.aipServerAddrs)) + outf.write(' aipListenAddrs : %s\n' % + ip4_array_string(serverinfo.aipListenAddrs)) + outf.write(' aipForwarders : %s\n' % + ip4_array_string(serverinfo.aipForwarders)) + else: + outf.write(' aipServerAddrs : %s\n' % + dns_addr_array_string(serverinfo.aipServerAddrs)) + outf.write(' aipListenAddrs : %s\n' % + dns_addr_array_string(serverinfo.aipListenAddrs)) + outf.write(' aipForwarders : %s\n' % + dns_addr_array_string(serverinfo.aipForwarders)) + + outf.write(' dwLogLevel : %d\n' % serverinfo.dwLogLevel) + outf.write(' dwDebugLevel : %d\n' % serverinfo.dwDebugLevel) + outf.write(' dwForwardTimeout : %d\n' % serverinfo.dwForwardTimeout) + outf.write(' dwRpcPrototol : 0x%x\n' % serverinfo.dwRpcProtocol) + outf.write(' dwNameCheckFlag : %s\n' % name_check_flag_string(serverinfo.dwNameCheckFlag)) + outf.write(' cAddressAnswerLimit : %d\n' % serverinfo.cAddressAnswerLimit) + outf.write(' dwRecursionRetry : %d\n' % serverinfo.dwRecursionRetry) + outf.write(' dwRecursionTimeout : %d\n' % serverinfo.dwRecursionTimeout) + outf.write(' dwMaxCacheTtl : %d\n' % serverinfo.dwMaxCacheTtl) + outf.write(' dwDsPollingInterval : %d\n' % serverinfo.dwDsPollingInterval) + outf.write(' dwScavengingInterval : %d\n' % serverinfo.dwScavengingInterval) + outf.write(' dwDefaultRefreshInterval : %d\n' % serverinfo.dwDefaultRefreshInterval) + outf.write(' dwDefaultNoRefreshInterval : %d\n' % serverinfo.dwDefaultNoRefreshInterval) + outf.write(' fAutoReverseZones : %s\n' % bool_string(serverinfo.fAutoReverseZones)) + outf.write(' fAutoCacheUpdate : %s\n' % bool_string(serverinfo.fAutoCacheUpdate)) + outf.write(' fRecurseAfterForwarding : %s\n' % bool_string(serverinfo.fRecurseAfterForwarding)) + outf.write(' fForwardDelegations : %s\n' % bool_string(serverinfo.fForwardDelegations)) + outf.write(' fNoRecursion : %s\n' % bool_string(serverinfo.fNoRecursion)) + outf.write(' fSecureResponses : %s\n' % bool_string(serverinfo.fSecureResponses)) + outf.write(' fRoundRobin : %s\n' % bool_string(serverinfo.fRoundRobin)) + outf.write(' fLocalNetPriority : %s\n' % bool_string(serverinfo.fLocalNetPriority)) + outf.write(' fBindSecondaries : %s\n' % bool_string(serverinfo.fBindSecondaries)) + outf.write(' fWriteAuthorityNs : %s\n' % bool_string(serverinfo.fWriteAuthorityNs)) + outf.write(' fStrictFileParsing : %s\n' % bool_string(serverinfo.fStrictFileParsing)) + outf.write(' fLooseWildcarding : %s\n' % bool_string(serverinfo.fLooseWildcarding)) + outf.write(' fDefaultAgingState : %s\n' % bool_string(serverinfo.fDefaultAgingState)) + + if typeid != dnsserver.DNSSRV_TYPEID_SERVER_INFO_W2K: + outf.write(' dwRpcStructureVersion : 0x%x\n' % serverinfo.dwRpcStructureVersion) + outf.write(' aipLogFilter : %s\n' % dns_addr_array_string(serverinfo.aipLogFilter)) + outf.write(' pwszLogFilePath : %s\n' % serverinfo.pwszLogFilePath) + outf.write(' pszDomainName : %s\n' % serverinfo.pszDomainName) + outf.write(' pszForestName : %s\n' % serverinfo.pszForestName) + outf.write(' pszDomainDirectoryPartition : %s\n' % serverinfo.pszDomainDirectoryPartition) + outf.write(' pszForestDirectoryPartition : %s\n' % serverinfo.pszForestDirectoryPartition) + + outf.write(' dwLocalNetPriorityNetMask : 0x%x\n' % serverinfo.dwLocalNetPriorityNetMask) + outf.write(' dwLastScavengeTime : %d\n' % serverinfo.dwLastScavengeTime) + outf.write(' dwEventLogLevel : %d\n' % serverinfo.dwEventLogLevel) + outf.write(' dwLogFileMaxSize : %d\n' % serverinfo.dwLogFileMaxSize) + outf.write(' dwDsForestVersion : %d\n' % serverinfo.dwDsForestVersion) + outf.write(' dwDsDomainVersion : %d\n' % serverinfo.dwDsDomainVersion) + outf.write(' dwDsDsaVersion : %d\n' % serverinfo.dwDsDsaVersion) + + if typeid == dnsserver.DNSSRV_TYPEID_SERVER_INFO: + outf.write(' fReadOnlyDC : %s\n' % bool_string(serverinfo.fReadOnlyDC)) + + +def print_zoneinfo(outf, typeid, zoneinfo): + outf.write(' pszZoneName : %s\n' % zoneinfo.pszZoneName) + outf.write(' dwZoneType : %s\n' % zone_type_string(zoneinfo.dwZoneType)) + outf.write(' fReverse : %s\n' % bool_string(zoneinfo.fReverse)) + outf.write(' fAllowUpdate : %s\n' % zone_update_string(zoneinfo.fAllowUpdate)) + outf.write(' fPaused : %s\n' % bool_string(zoneinfo.fPaused)) + outf.write(' fShutdown : %s\n' % bool_string(zoneinfo.fShutdown)) + outf.write(' fAutoCreated : %s\n' % bool_string(zoneinfo.fAutoCreated)) + outf.write(' fUseDatabase : %s\n' % bool_string(zoneinfo.fUseDatabase)) + outf.write(' pszDataFile : %s\n' % zoneinfo.pszDataFile) + if typeid != dnsserver.DNSSRV_TYPEID_ZONE_INFO: + outf.write(' aipMasters : %s\n' % + ip4_array_string(zoneinfo.aipMasters)) + else: + outf.write(' aipMasters : %s\n' % + dns_addr_array_string(zoneinfo.aipMasters)) + outf.write(' fSecureSecondaries : %s\n' % zone_secondary_security_string(zoneinfo.fSecureSecondaries)) + outf.write(' fNotifyLevel : %s\n' % zone_notify_level_string(zoneinfo.fNotifyLevel)) + if typeid != dnsserver.DNSSRV_TYPEID_ZONE_INFO: + outf.write(' aipSecondaries : %s\n' % + ip4_array_string(zoneinfo.aipSecondaries)) + outf.write(' aipNotify : %s\n' % + ip4_array_string(zoneinfo.aipNotify)) + else: + outf.write(' aipSecondaries : %s\n' % + dns_addr_array_string(zoneinfo.aipSecondaries)) + outf.write(' aipNotify : %s\n' % + dns_addr_array_string(zoneinfo.aipNotify)) + outf.write(' fUseWins : %s\n' % bool_string(zoneinfo.fUseWins)) + outf.write(' fUseNbstat : %s\n' % bool_string(zoneinfo.fUseNbstat)) + outf.write(' fAging : %s\n' % bool_string(zoneinfo.fAging)) + outf.write(' dwNoRefreshInterval : %d\n' % zoneinfo.dwNoRefreshInterval) + outf.write(' dwRefreshInterval : %d\n' % zoneinfo.dwRefreshInterval) + outf.write(' dwAvailForScavengeTime : %d\n' % zoneinfo.dwAvailForScavengeTime) + if typeid != dnsserver.DNSSRV_TYPEID_ZONE_INFO: + outf.write(' aipScavengeServers : %s\n' % + ip4_array_string(zoneinfo.aipScavengeServers)) + else: + outf.write(' aipScavengeServers : %s\n' % + dns_addr_array_string(zoneinfo.aipScavengeServers)) + + if typeid != dnsserver.DNSSRV_TYPEID_ZONE_INFO_W2K: + outf.write(' dwRpcStructureVersion : 0x%x\n' % zoneinfo.dwRpcStructureVersion) + outf.write(' dwForwarderTimeout : %d\n' % zoneinfo.dwForwarderTimeout) + outf.write(' fForwarderSlave : %d\n' % zoneinfo.fForwarderSlave) + if typeid != dnsserver.DNSSRV_TYPEID_ZONE_INFO: + outf.write(' aipLocalMasters : %s\n' % + ip4_array_string(zoneinfo.aipLocalMasters)) + else: + outf.write(' aipLocalMasters : %s\n' % + dns_addr_array_string(zoneinfo.aipLocalMasters)) + outf.write(' dwDpFlags : %s\n' % dp_flags_string(zoneinfo.dwDpFlags)) + outf.write(' pszDpFqdn : %s\n' % zoneinfo.pszDpFqdn) + outf.write(' pwszZoneDn : %s\n' % zoneinfo.pwszZoneDn) + outf.write(' dwLastSuccessfulSoaCheck : %d\n' % zoneinfo.dwLastSuccessfulSoaCheck) + outf.write(' dwLastSuccessfulXfr : %d\n' % zoneinfo.dwLastSuccessfulXfr) + + if typeid == dnsserver.DNSSRV_TYPEID_ZONE_INFO: + outf.write(' fQueuedForBackgroundLoad : %s\n' % bool_string(zoneinfo.fQueuedForBackgroundLoad)) + outf.write(' fBackgroundLoadInProgress : %s\n' % bool_string(zoneinfo.fBackgroundLoadInProgress)) + outf.write(' fReadOnlyZone : %s\n' % bool_string(zoneinfo.fReadOnlyZone)) + outf.write(' dwLastXfrAttempt : %d\n' % zoneinfo.dwLastXfrAttempt) + outf.write(' dwLastXfrResult : %d\n' % zoneinfo.dwLastXfrResult) + + +def print_zone(outf, typeid, zone): + outf.write(' pszZoneName : %s\n' % zone.pszZoneName) + outf.write(' Flags : %s\n' % zone_flags_string(zone.Flags)) + outf.write(' ZoneType : %s\n' % zone_type_string(zone.ZoneType)) + outf.write(' Version : %s\n' % zone.Version) + + if typeid != dnsserver.DNSSRV_TYPEID_ZONE_W2K: + outf.write(' dwDpFlags : %s\n' % dp_flags_string(zone.dwDpFlags)) + outf.write(' pszDpFqdn : %s\n' % zone.pszDpFqdn) + + +def print_enumzones(outf, typeid, zones): + outf.write(' %d zone(s) found\n' % zones.dwZoneCount) + for zone in zones.ZoneArray: + outf.write('\n') + print_zone(outf, typeid, zone) + + +def print_dns_record(outf, rec): + if rec.wType == dnsp.DNS_TYPE_A: + mesg = 'A: %s' % (rec.data) + elif rec.wType == dnsp.DNS_TYPE_AAAA: + mesg = 'AAAA: %s' % (rec.data) + elif rec.wType == dnsp.DNS_TYPE_PTR: + mesg = 'PTR: %s' % (rec.data.str) + elif rec.wType == dnsp.DNS_TYPE_NS: + mesg = 'NS: %s' % (rec.data.str) + elif rec.wType == dnsp.DNS_TYPE_CNAME: + mesg = 'CNAME: %s' % (rec.data.str) + elif rec.wType == dnsp.DNS_TYPE_SOA: + mesg = 'SOA: serial=%d, refresh=%d, retry=%d, expire=%d, minttl=%d, ns=%s, email=%s' % ( + rec.data.dwSerialNo, + rec.data.dwRefresh, + rec.data.dwRetry, + rec.data.dwExpire, + rec.data.dwMinimumTtl, + rec.data.NamePrimaryServer.str, + rec.data.ZoneAdministratorEmail.str) + elif rec.wType == dnsp.DNS_TYPE_MX: + mesg = 'MX: %s (%d)' % (rec.data.nameExchange.str, rec.data.wPreference) + elif rec.wType == dnsp.DNS_TYPE_SRV: + mesg = 'SRV: %s (%d, %d, %d)' % (rec.data.nameTarget.str, rec.data.wPort, + rec.data.wPriority, rec.data.wWeight) + elif rec.wType == dnsp.DNS_TYPE_TXT: + slist = ['"%s"' % name.str for name in rec.data.str] + mesg = 'TXT: %s' % ','.join(slist) + else: + mesg = 'Unknown: ' + outf.write(' %s (flags=%x, serial=%d, ttl=%d)\n' % ( + mesg, rec.dwFlags, rec.dwSerial, rec.dwTtlSeconds)) + + +def print_dnsrecords(outf, records): + for rec in records.rec: + outf.write(' Name=%s, Records=%d, Children=%d\n' % ( + rec.dnsNodeName.str, + rec.wRecordCount, + rec.dwChildCount)) + for dns_rec in rec.records: + print_dns_record(outf, dns_rec) + + +# Convert data into a dns record +def data_to_dns_record(record_type, data): + try: + rec = record_from_string(record_type, data) + except DNSParseError as e: + raise CommandError(*e.args) from None + + return rec + + +class cmd_serverinfo(Command): + """Query for Server information.""" + + synopsis = '%prog [options]' + + takes_args = ['server'] + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option('--client-version', help='Client Version', + default='longhorn', metavar='w2k|dotnet|longhorn', + choices=['w2k', 'dotnet', 'longhorn'], dest='cli_ver'), + ] + + def run(self, server, cli_ver, sambaopts=None, credopts=None, + versionopts=None): + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp) + dns_conn = DnsConnWrapper(server, self.lp, self.creds) + + client_version = dns_client_version(cli_ver) + + typeid, res = dns_conn.DnssrvQuery2(client_version, 0, server, + None, 'ServerInfo') + print_serverinfo(self.outf, typeid, res) + + +def _add_integer_options(table, takes_options, integer_properties): + """Generate options for cmd_zoneoptions""" + for k, doc, _min, _max in table: + o = '--' + k.lower() + opt = Option(o, + help=f"{doc} [{_min}-{_max}]", + type="int", + dest=k) + takes_options.append(opt) + integer_properties.append((k, _min, _max, o)) + + +class cmd_zoneoptions(Command): + """Change zone aging options.""" + + synopsis = '%prog [options]' + + takes_args = ['server', 'zone'] + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option('--client-version', help='Client Version', + default='longhorn', metavar='w2k|dotnet|longhorn', + choices=['w2k', 'dotnet', 'longhorn'], dest='cli_ver'), + Option('--mark-old-records-static', metavar="YYYY-MM-DD", + help="Make records older than this (YYYY-MM-DD) static"), + Option('--mark-records-static-regex', metavar="REGEXP", + help="Make records matching this regular expression static"), + Option('--mark-records-dynamic-regex', metavar="REGEXP", + help="Make records matching this regular expression dynamic"), + Option('-n', '--dry-run', action='store_true', + help="Don't change anything, say what would happen"), + ] + + integer_properties = [] + # Any zone parameter that is stored as an integer (which is most of + # them) can be added to this table. The name should be the dnsp + # mixed case name, which will get munged into a lowercase name for + # the option. (e.g. "Aging" becomes "--aging"). + # + # Note: just because we add a name here doesn't mean we will use + # it. + _add_integer_options([ + # ( name, help-string, min, max ) + ('Aging', 'Enable record aging', 0, 1), + ('NoRefreshInterval', + 'Aging no refresh interval in hours (0: use default)', + 0, 10 * 365 * 24), + ('RefreshInterval', + 'Aging refresh interval in hours (0: use default)', + 0, 10 * 365 * 24), + ], + takes_options, + integer_properties) + + def run(self, server, zone, cli_ver, sambaopts=None, credopts=None, + versionopts=None, dry_run=False, + mark_old_records_static=None, + mark_records_static_regex=None, + mark_records_dynamic_regex=None, + **kwargs): + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp) + dns_conn = DnsConnWrapper(server, self.lp, self.creds) + + client_version = dns_client_version(cli_ver) + nap_type = dnsserver.DNSSRV_TYPEID_NAME_AND_PARAM + + for k, _min, _max, o in self.integer_properties: + if kwargs.get(k) is None: + continue + v = kwargs[k] + if _min is not None and v < _min: + raise CommandError(f"{o} must be at least {_min}") + if _max is not None and v > _max: + raise CommandError(f"{o} can't exceed {_max}") + + name_param = dnsserver.DNS_RPC_NAME_AND_PARAM() + name_param.dwParam = v + name_param.pszNodeName = k + if dry_run: + print(f"would set {k} to {v} for {zone}", file=self.outf) + continue + try: + dns_conn.DnssrvOperation2(client_version, + 0, + server, + zone, + 0, + 'ResetDwordProperty', + nap_type, + name_param) + except WERRORError as e: + raise CommandError(f"Could not set {k} to {v}") from None + + print(f"Set {k} to {v}", file=self.outf) + + # We don't want to allow more than one of these --mark-* + # options at a time, as they are sensitive to ordering and + # the order is not documented. + n_mark_options = 0 + for x in (mark_old_records_static, + mark_records_static_regex, + mark_records_dynamic_regex): + if x is not None: + n_mark_options += 1 + + if n_mark_options > 1: + raise CommandError("Multiple --mark-* options will not work\n") + + if mark_old_records_static is not None: + self.mark_old_records_static(server, zone, + mark_old_records_static, + dry_run) + + if mark_records_static_regex is not None: + self.mark_records_static_regex(server, + zone, + mark_records_static_regex, + dry_run) + + if mark_records_dynamic_regex is not None: + self.mark_records_dynamic_regex(server, + zone, + mark_records_dynamic_regex, + dry_run) + + + def _get_dns_nodes(self, server, zone_name): + samdb = SamDB(url="ldap://%s" % server, + session_info=system_session(), + credentials=self.creds, lp=self.lp) + + zone_dn = (f"DC={zone_name},CN=MicrosoftDNS,DC=DomainDNSZones," + f"{samdb.get_default_basedn()}") + + nodes = samdb.search(base=zone_dn, + scope=ldb.SCOPE_SUBTREE, + expression=("(&(objectClass=dnsNode)" + "(!(dNSTombstoned=TRUE)))"), + attrs=["dnsRecord", "name"]) + return samdb, nodes + + def mark_old_records_static(self, server, zone_name, date_string, dry_run): + try: + ts = time.strptime(date_string, "%Y-%m-%d") + t = time.mktime(ts) + except ValueError as e: + raise CommandError(f"Invalid date {date_string}: should be YYY-MM-DD") + threshold = dsdb_dns.unix_to_dns_timestamp(int(t)) + + samdb, nodes = self._get_dns_nodes(server, zone_name) + + for node in nodes: + if "dnsRecord" not in node: + continue + + values = list(node["dnsRecord"]) + changes = 0 + for i, v in enumerate(values): + rec = ndr_unpack(dnsp.DnssrvRpcRecord, v) + if rec.dwTimeStamp < threshold and rec.dwTimeStamp != 0: + rec.dwTimeStamp = 0 + values[i] = ndr_pack(rec) + changes += 1 + + if changes == 0: + continue + + name = node["name"][0].decode() + + if dry_run: + print(f"would make {changes}/{len(values)} records static " + f"on {name}.{zone_name}.", file=self.outf) + continue + + msg = ldb.Message.from_dict(samdb, + {'dn': node.dn, + 'dnsRecord': values + }, + ldb.FLAG_MOD_REPLACE) + samdb.modify(msg) + print(f"made {changes}/{len(values)} records static on " + f"{name}.{zone_name}.", file=self.outf) + + def mark_records_static_regex(self, server, zone_name, regex, dry_run): + """Make the records of nodes with matching names static. + """ + r = re.compile(regex) + samdb, nodes = self._get_dns_nodes(server, zone_name) + + for node in nodes: + name = node["name"][0].decode() + if not r.search(name): + continue + if "dnsRecord" not in node: + continue + + values = list(node["dnsRecord"]) + if len(values) == 0: + continue + + changes = 0 + for i, v in enumerate(values): + rec = ndr_unpack(dnsp.DnssrvRpcRecord, v) + if rec.dwTimeStamp != 0: + rec.dwTimeStamp = 0 + values[i] = ndr_pack(rec) + changes += 1 + + if changes == 0: + continue + + if dry_run: + print(f"would make {changes}/{len(values)} records static " + f"on {name}.{zone_name}.", file=self.outf) + continue + + msg = ldb.Message.from_dict(samdb, + {'dn': node.dn, + 'dnsRecord': values + }, + ldb.FLAG_MOD_REPLACE) + samdb.modify(msg) + print(f"made {changes}/{len(values)} records static on " + f"{name}.{zone_name}.", file=self.outf) + + def mark_records_dynamic_regex(self, server, zone_name, regex, dry_run): + """Make the records of nodes with matching names dynamic, with a + current timestamp. In this case we only adjust the A, AAAA, + and TXT records. + """ + r = re.compile(regex) + samdb, nodes = self._get_dns_nodes(server, zone_name) + now = time.time() + dns_timestamp = dsdb_dns.unix_to_dns_timestamp(int(now)) + safe_wtypes = { + dnsp.DNS_TYPE_A, + dnsp.DNS_TYPE_AAAA, + dnsp.DNS_TYPE_TXT + } + + for node in nodes: + name = node["name"][0].decode() + if not r.search(name): + continue + if "dnsRecord" not in node: + continue + + values = list(node["dnsRecord"]) + if len(values) == 0: + continue + + changes = 0 + for i, v in enumerate(values): + rec = ndr_unpack(dnsp.DnssrvRpcRecord, v) + if rec.wType in safe_wtypes and rec.dwTimeStamp == 0: + rec.dwTimeStamp = dns_timestamp + values[i] = ndr_pack(rec) + changes += 1 + + if changes == 0: + continue + + if dry_run: + print(f"would make {changes}/{len(values)} records dynamic " + f"on {name}.{zone_name}.", file=self.outf) + continue + + msg = ldb.Message.from_dict(samdb, + {'dn': node.dn, + 'dnsRecord': values + }, + ldb.FLAG_MOD_REPLACE) + samdb.modify(msg) + print(f"made {changes}/{len(values)} records dynamic on " + f"{name}.{zone_name}.", file=self.outf) + + +class cmd_zoneinfo(Command): + """Query for zone information.""" + + synopsis = '%prog [options]' + + takes_args = ['server', 'zone'] + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option('--client-version', help='Client Version', + default='longhorn', metavar='w2k|dotnet|longhorn', + choices=['w2k', 'dotnet', 'longhorn'], dest='cli_ver'), + ] + + def run(self, server, zone, cli_ver, sambaopts=None, credopts=None, + versionopts=None): + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp) + dns_conn = DnsConnWrapper(server, self.lp, self.creds) + + client_version = dns_client_version(cli_ver) + + typeid, res = dns_conn.DnssrvQuery2(client_version, 0, server, zone, + 'ZoneInfo') + print_zoneinfo(self.outf, typeid, res) + + +class cmd_zonelist(Command): + """Query for zones.""" + + synopsis = '%prog [options]' + + takes_args = ['server'] + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option('--client-version', help='Client Version', + default='longhorn', metavar='w2k|dotnet|longhorn', + choices=['w2k', 'dotnet', 'longhorn'], dest='cli_ver'), + Option('--primary', help='List primary zones (default)', + action='store_true', dest='primary'), + Option('--secondary', help='List secondary zones', + action='store_true', dest='secondary'), + Option('--cache', help='List cached zones', + action='store_true', dest='cache'), + Option('--auto', help='List automatically created zones', + action='store_true', dest='auto'), + Option('--forward', help='List forward zones', + action='store_true', dest='forward'), + Option('--reverse', help='List reverse zones', + action='store_true', dest='reverse'), + Option('--ds', help='List directory integrated zones', + action='store_true', dest='ds'), + Option('--non-ds', help='List non-directory zones', + action='store_true', dest='nonds') + ] + + def run(self, server, cli_ver, primary=False, secondary=False, cache=False, + auto=False, forward=False, reverse=False, ds=False, nonds=False, + sambaopts=None, credopts=None, versionopts=None): + request_filter = 0 + + if primary: + request_filter |= dnsserver.DNS_ZONE_REQUEST_PRIMARY + if secondary: + request_filter |= dnsserver.DNS_ZONE_REQUEST_SECONDARY + if cache: + request_filter |= dnsserver.DNS_ZONE_REQUEST_CACHE + if auto: + request_filter |= dnsserver.DNS_ZONE_REQUEST_AUTO + if forward: + request_filter |= dnsserver.DNS_ZONE_REQUEST_FORWARD + if reverse: + request_filter |= dnsserver.DNS_ZONE_REQUEST_REVERSE + if ds: + request_filter |= dnsserver.DNS_ZONE_REQUEST_DS + if nonds: + request_filter |= dnsserver.DNS_ZONE_REQUEST_NON_DS + + if request_filter == 0: + request_filter = dnsserver.DNS_ZONE_REQUEST_PRIMARY + + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp) + dns_conn = DnsConnWrapper(server, self.lp, self.creds) + + client_version = dns_client_version(cli_ver) + + typeid, res = dns_conn.DnssrvComplexOperation2(client_version, + 0, server, None, + 'EnumZones', + dnsserver.DNSSRV_TYPEID_DWORD, + request_filter) + + if client_version == dnsserver.DNS_CLIENT_VERSION_W2K: + typeid = dnsserver.DNSSRV_TYPEID_ZONE_W2K + else: + typeid = dnsserver.DNSSRV_TYPEID_ZONE + print_enumzones(self.outf, typeid, res) + + +class cmd_zonecreate(Command): + """Create a zone.""" + + synopsis = '%prog [options]' + + takes_args = ['server', 'zone'] + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option('--client-version', help='Client Version', + default='longhorn', metavar='w2k|dotnet|longhorn', + choices=['w2k', 'dotnet', 'longhorn'], dest='cli_ver'), + Option('--dns-directory-partition', + help='Specify the naming context for the new zone, which ' + 'affects the replication scope (domain or forest wide ' + 'replication, default: domain).', + default='domain', + metavar='domain|forest', + choices=['domain', 'forest'], + dest='dns_dp'), + ] + + def run(self, + server, + zone, + cli_ver, + dns_dp, + sambaopts=None, + credopts=None, + versionopts=None): + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp) + dns_conn = DnsConnWrapper(server, self.lp, self.creds) + + zone = zone.lower() + + dns_directorypartition = dnsserver.DNS_DP_DOMAIN_DEFAULT + if dns_dp == 'forest': + dns_directorypartition = dnsserver.DNS_DP_FOREST_DEFAULT + + client_version = dns_client_version(cli_ver) + if client_version == dnsserver.DNS_CLIENT_VERSION_W2K: + typeid = dnsserver.DNSSRV_TYPEID_ZONE_CREATE_W2K + zone_create_info = dnsserver.DNS_RPC_ZONE_CREATE_INFO_W2K() + zone_create_info.pszZoneName = zone + zone_create_info.dwZoneType = dnsp.DNS_ZONE_TYPE_PRIMARY + zone_create_info.fAging = 0 + zone_create_info.fDsIntegrated = 1 + zone_create_info.fLoadExisting = 1 + elif client_version == dnsserver.DNS_CLIENT_VERSION_DOTNET: + typeid = dnsserver.DNSSRV_TYPEID_ZONE_CREATE_DOTNET + zone_create_info = dnsserver.DNS_RPC_ZONE_CREATE_INFO_DOTNET() + zone_create_info.pszZoneName = zone + zone_create_info.dwZoneType = dnsp.DNS_ZONE_TYPE_PRIMARY + zone_create_info.fAging = 0 + zone_create_info.fDsIntegrated = 1 + zone_create_info.fLoadExisting = 1 + zone_create_info.dwDpFlags = dns_directorypartition + else: + typeid = dnsserver.DNSSRV_TYPEID_ZONE_CREATE + zone_create_info = dnsserver.DNS_RPC_ZONE_CREATE_INFO_LONGHORN() + zone_create_info.pszZoneName = zone + zone_create_info.dwZoneType = dnsp.DNS_ZONE_TYPE_PRIMARY + zone_create_info.fAging = 0 + zone_create_info.fDsIntegrated = 1 + zone_create_info.fLoadExisting = 1 + zone_create_info.dwDpFlags = dns_directorypartition + + dns_conn.DnssrvOperation2(client_version, 0, server, None, + 0, 'ZoneCreate', typeid, + zone_create_info) + + typeid = dnsserver.DNSSRV_TYPEID_NAME_AND_PARAM + name_and_param = dnsserver.DNS_RPC_NAME_AND_PARAM() + name_and_param.pszNodeName = 'AllowUpdate' + name_and_param.dwParam = dnsp.DNS_ZONE_UPDATE_SECURE + + messages = { + werror.WERR_DNS_ERROR_ZONE_ALREADY_EXISTS: ( + f'Zone "{zone}" already exists.') + } + + dns_conn.DnssrvOperation2(client_version, 0, server, zone, + 0, 'ResetDwordProperty', typeid, + name_and_param, messages=messages) + + self.outf.write('Zone %s created successfully\n' % zone) + + +class cmd_zonedelete(Command): + """Delete a zone.""" + + synopsis = '%prog [options]' + + takes_args = ['server', 'zone'] + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + def run(self, server, zone, sambaopts=None, credopts=None, + versionopts=None): + + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp) + dns_conn = DnsConnWrapper(server, self.lp, self.creds) + + zone = zone.lower() + + messages = { + werror.WERR_DNS_ERROR_ZONE_DOES_NOT_EXIST: ( + f'Zone {zone} does not exist and so could not be deleted.'), + } + res = dns_conn.DnssrvOperation2(dnsserver.DNS_CLIENT_VERSION_LONGHORN, + 0, server, zone, 0, 'DeleteZoneFromDs', + dnsserver.DNSSRV_TYPEID_NULL, + None, messages=messages) + + self.outf.write('Zone %s deleted successfully\n' % zone) + + +class cmd_query(Command): + """Query a name.""" + + synopsis = ('%prog ' + ' [options]') + + takes_args = ['server', 'zone', 'name', 'rtype'] + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option('--authority', help='Search authoritative records (default)', + action='store_true', dest='authority'), + Option('--cache', help='Search cached records', + action='store_true', dest='cache'), + Option('--glue', help='Search glue records', + action='store_true', dest='glue'), + Option('--root', help='Search root hints', + action='store_true', dest='root'), + Option('--additional', help='List additional records', + action='store_true', dest='additional'), + Option('--no-children', help='Do not list children', + action='store_true', dest='no_children'), + Option('--only-children', help='List only children', + action='store_true', dest='only_children') + ] + + def run(self, server, zone, name, rtype, authority=False, cache=False, + glue=False, root=False, additional=False, no_children=False, + only_children=False, sambaopts=None, credopts=None, + versionopts=None): + record_type = dns_type_flag(rtype) + + if name.find('*') != -1: + self.outf.write('use "@" to dump entire domain, looking up %s\n' % + name) + + select_flags = 0 + if authority: + select_flags |= dnsserver.DNS_RPC_VIEW_AUTHORITY_DATA + if cache: + select_flags |= dnsserver.DNS_RPC_VIEW_CACHE_DATA + if glue: + select_flags |= dnsserver.DNS_RPC_VIEW_GLUE_DATA + if root: + select_flags |= dnsserver.DNS_RPC_VIEW_ROOT_HINT_DATA + if additional: + select_flags |= dnsserver.DNS_RPC_VIEW_ADDITIONAL_DATA + if no_children: + select_flags |= dnsserver.DNS_RPC_VIEW_NO_CHILDREN + if only_children: + select_flags |= dnsserver.DNS_RPC_VIEW_ONLY_CHILDREN + + if select_flags == 0: + select_flags = dnsserver.DNS_RPC_VIEW_AUTHORITY_DATA + + if select_flags == dnsserver.DNS_RPC_VIEW_ADDITIONAL_DATA: + self.outf.write('Specify either --authority or --root along with --additional.\n') + self.outf.write('Assuming --authority.\n') + select_flags |= dnsserver.DNS_RPC_VIEW_AUTHORITY_DATA + + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp) + dns_conn = DnsConnWrapper(server, self.lp, self.creds) + + messages = { + werror.WERR_DNS_ERROR_NAME_DOES_NOT_EXIST: ( + 'Record or zone does not exist.') + } + buflen, res = dns_conn.DnssrvEnumRecords2( + dnsserver.DNS_CLIENT_VERSION_LONGHORN, 0, server, zone, name, + None, record_type, select_flags, None, None, + messages=messages) + + print_dnsrecords(self.outf, res) + + +class cmd_roothints(Command): + """Query root hints.""" + + synopsis = '%prog [] [options]' + + takes_args = ['server', 'name?'] + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + def run(self, server, name='.', sambaopts=None, credopts=None, + versionopts=None): + record_type = dnsp.DNS_TYPE_NS + select_flags = (dnsserver.DNS_RPC_VIEW_ROOT_HINT_DATA | + dnsserver.DNS_RPC_VIEW_ADDITIONAL_DATA) + + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp) + dns_conn = DnsConnWrapper(server, self.lp, self.creds) + + buflen, res = dns_conn.DnssrvEnumRecords2( + dnsserver.DNS_CLIENT_VERSION_LONGHORN, 0, server, '..RootHints', + name, None, record_type, select_flags, None, None) + print_dnsrecords(self.outf, res) + + +class cmd_add_record(Command): + """Add a DNS record + + For each type data contents are as follows: + A ipv4_address_string + AAAA ipv6_address_string + PTR fqdn_string + CNAME fqdn_string + NS fqdn_string + MX "fqdn_string preference" + SRV "fqdn_string port priority weight" + TXT "'string1' 'string2' ..." + """ + + synopsis = '%prog ' + + takes_args = ['server', 'zone', 'name', 'rtype', 'data'] + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + def run(self, server, zone, name, rtype, data, sambaopts=None, + credopts=None, versionopts=None): + + if rtype.upper() not in ('A', 'AAAA', 'PTR', 'CNAME', 'NS', 'MX', 'SRV', 'TXT'): + raise CommandError('Adding record of type %s is not supported' % rtype) + + record_type = dns_type_flag(rtype) + rec = data_to_dns_record(record_type, data) + + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp) + dns_conn = DnsConnWrapper(server, self.lp, self.creds) + + add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF() + add_rec_buf.rec = rec + + messages = { + werror.WERR_DNS_ERROR_NAME_DOES_NOT_EXIST: ( + 'Zone does not exist; record could not be added. ' + f'zone[{zone}] name[{name}'), + werror.WERR_DNS_ERROR_RECORD_ALREADY_EXISTS: ( + 'Record already exists; record could not be added. ' + f'zone[{zone}] name[{name}]') + } + dns_conn.DnssrvUpdateRecord2(dnsserver.DNS_CLIENT_VERSION_LONGHORN, + 0, server, zone, name, add_rec_buf, None, + messages=messages) + + self.outf.write('Record added successfully\n') + + +class cmd_update_record(Command): + """Update a DNS record + + For each type data contents are as follows: + A ipv4_address_string + AAAA ipv6_address_string + PTR fqdn_string + CNAME fqdn_string + NS fqdn_string + MX "fqdn_string preference" + SOA "fqdn_dns fqdn_email serial refresh retry expire minimumttl" + SRV "fqdn_string port priority weight" + TXT "'string1' 'string2' ..." + """ + + synopsis = '%prog ' + + takes_args = ['server', 'zone', 'name', 'rtype', 'olddata', 'newdata'] + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + def run(self, server, zone, name, rtype, olddata, newdata, + sambaopts=None, credopts=None, versionopts=None): + + rtype = rtype.upper() + if rtype not in ('A', 'AAAA', 'PTR', 'CNAME', 'NS', 'MX', 'SOA', 'SRV', 'TXT'): + raise CommandError('Updating record of type %s is not supported' % rtype) + + try: + if rtype == 'A': + inet_pton(AF_INET, newdata) + elif rtype == 'AAAA': + inet_pton(AF_INET6, newdata) + except OSError as e: + raise CommandError(f"bad data for {rtype}: {e!r}") + + record_type = dns_type_flag(rtype) + rec = data_to_dns_record(record_type, newdata) + + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp) + dns_conn = DnsConnWrapper(server, self.lp, self.creds) + + try: + rec_match = dns_record_match(dns_conn.dns_conn, server, zone, + name, record_type, olddata) + except DNSParseError as e: + raise CommandError(*e.args) from None + + if not rec_match: + raise CommandError('Record or zone does not exist.') + + # Copy properties from existing record to new record + rec.dwFlags = rec_match.dwFlags + rec.dwSerial = rec_match.dwSerial + rec.dwTtlSeconds = rec_match.dwTtlSeconds + rec.dwTimeStamp = rec_match.dwTimeStamp + + add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF() + add_rec_buf.rec = rec + + del_rec_buf = dnsserver.DNS_RPC_RECORD_BUF() + del_rec_buf.rec = rec_match + + messages = { + werror.WERR_DNS_ERROR_NAME_DOES_NOT_EXIST: ( + f'Zone {zone} does not exist; record could not be updated.'), + } + + dns_conn.DnssrvUpdateRecord2(dnsserver.DNS_CLIENT_VERSION_LONGHORN, + 0, + server, + zone, + name, + add_rec_buf, + del_rec_buf, + messages=messages) + + self.outf.write('Record updated successfully\n') + + +class cmd_delete_record(Command): + """Delete a DNS record + + For each type data contents are as follows: + A ipv4_address_string + AAAA ipv6_address_string + PTR fqdn_string + CNAME fqdn_string + NS fqdn_string + MX "fqdn_string preference" + SRV "fqdn_string port priority weight" + TXT "'string1' 'string2' ..." + """ + + synopsis = '%prog ' + + takes_args = ['server', 'zone', 'name', 'rtype', 'data'] + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + def run(self, server, zone, name, rtype, data, sambaopts=None, credopts=None, versionopts=None): + + if rtype.upper() not in ('A', 'AAAA', 'PTR', 'CNAME', 'NS', 'MX', 'SRV', 'TXT'): + raise CommandError('Deleting record of type %s is not supported' % rtype) + + record_type = dns_type_flag(rtype) + rec = data_to_dns_record(record_type, data) + + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp) + dns_conn = DnsConnWrapper(server, self.lp, self.creds) + + del_rec_buf = dnsserver.DNS_RPC_RECORD_BUF() + del_rec_buf.rec = rec + + messages = { + werror.WERR_DNS_ERROR_NAME_DOES_NOT_EXIST: ( + 'Zone does not exist; record could not be deleted. ' + f'zone[{zone}] name[{name}'), + werror.WERR_DNS_ERROR_RECORD_ALREADY_EXISTS: ( + 'Record already exists; record could not be deleted. ' + f'zone[{zone}] name[{name}]') + } + dns_conn.DnssrvUpdateRecord2(dnsserver.DNS_CLIENT_VERSION_LONGHORN, + 0, + server, + zone, + name, + None, + del_rec_buf, + messages=messages) + + self.outf.write('Record deleted successfully\n') + + +class cmd_cleanup_record(Command): + """Cleanup DNS records for a DNS host. + + example: + + samba-tool dns cleanup dc1 dc1.samdom.test.site -U USER%PASSWORD + + NOTE: This command in many cases will only mark the `dNSTombstoned` attr + as `TRUE` on the DNS records. Querying will no longer return results but + there may still be some placeholder entries in the database. + """ + + synopsis = '%prog ' + + takes_args = ['server', 'dnshostname'] + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("-v", "--verbose", help="Be verbose", action="store_true"), + Option("-q", "--quiet", help="Be quiet", action="store_true"), + ] + + def run(self, server, dnshostname, sambaopts=None, credopts=None, + versionopts=None, verbose=False, quiet=False): + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp) + + logger = self.get_logger(verbose=verbose, quiet=quiet) + + samdb = SamDB(url="ldap://%s" % server, + session_info=system_session(), + credentials=creds, lp=lp) + + remove_dc.remove_dns_references(samdb, logger, dnshostname, + ignore_no_name=True) + + +class cmd_dns(SuperCommand): + """Domain Name Service (DNS) management.""" + + subcommands = {} + subcommands['serverinfo'] = cmd_serverinfo() + subcommands['zoneoptions'] = cmd_zoneoptions() + subcommands['zoneinfo'] = cmd_zoneinfo() + subcommands['zonelist'] = cmd_zonelist() + subcommands['zonecreate'] = cmd_zonecreate() + subcommands['zonedelete'] = cmd_zonedelete() + subcommands['query'] = cmd_query() + subcommands['roothints'] = cmd_roothints() + subcommands['add'] = cmd_add_record() + subcommands['update'] = cmd_update_record() + subcommands['delete'] = cmd_delete_record() + subcommands['cleanup'] = cmd_cleanup_record() diff --git a/python/samba/netcmd/domain/__init__.py b/python/samba/netcmd/domain/__init__.py new file mode 100644 index 0000000..1c527f1 --- /dev/null +++ b/python/samba/netcmd/domain/__init__.py @@ -0,0 +1,73 @@ +# domain management +# +# Copyright Matthias Dieter Wallnoefer 2009 +# Copyright Andrew Kroeger 2009 +# Copyright Jelmer Vernooij 2007-2012 +# Copyright Giampaolo Lauria 2011 +# Copyright Matthieu Patou 2011 +# Copyright Andrew Bartlett 2008-2015 +# Copyright Stefan Metzmacher 2012 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +from samba import is_ad_dc_built +from samba.netcmd import SuperCommand + +from .auth import cmd_domain_auth +from .backup import cmd_domain_backup +from .claim import cmd_domain_claim +from .classicupgrade import cmd_domain_classicupgrade +from .common import (common_join_options, common_ntvfs_options, + common_provision_join_options) +from .dcpromo import cmd_domain_dcpromo +from .demote import cmd_domain_demote +from .functional_prep import cmd_domain_functional_prep +from .info import cmd_domain_info +from .join import cmd_domain_join +from .keytab import cmd_domain_export_keytab +from .leave import cmd_domain_leave +from .level import cmd_domain_level +from .passwordsettings import cmd_domain_passwordsettings +from .provision import cmd_domain_provision +from .samba3upgrade import cmd_domain_samba3upgrade +from .schemaupgrade import cmd_domain_schema_upgrade +from .tombstones import cmd_domain_tombstones +from .trust import cmd_domain_trust + + +class cmd_domain(SuperCommand): + """Domain management.""" + + subcommands = {} + if cmd_domain_export_keytab is not None: + subcommands["exportkeytab"] = cmd_domain_export_keytab() + subcommands["info"] = cmd_domain_info() + subcommands["join"] = cmd_domain_join() + subcommands["leave"] = cmd_domain_leave() + subcommands["claim"] = cmd_domain_claim() + subcommands["auth"] = cmd_domain_auth() + if is_ad_dc_built(): + subcommands["demote"] = cmd_domain_demote() + subcommands["provision"] = cmd_domain_provision() + subcommands["dcpromo"] = cmd_domain_dcpromo() + subcommands["level"] = cmd_domain_level() + subcommands["passwordsettings"] = cmd_domain_passwordsettings() + subcommands["classicupgrade"] = cmd_domain_classicupgrade() + subcommands["samba3upgrade"] = cmd_domain_samba3upgrade() + subcommands["trust"] = cmd_domain_trust() + subcommands["tombstones"] = cmd_domain_tombstones() + subcommands["schemaupgrade"] = cmd_domain_schema_upgrade() + subcommands["functionalprep"] = cmd_domain_functional_prep() + subcommands["backup"] = cmd_domain_backup() diff --git a/python/samba/netcmd/domain/auth/__init__.py b/python/samba/netcmd/domain/auth/__init__.py new file mode 100644 index 0000000..fd74f3e --- /dev/null +++ b/python/samba/netcmd/domain/auth/__init__.py @@ -0,0 +1,35 @@ +# Unix SMB/CIFS implementation. +# +# authentication silos +# +# Copyright (C) Catalyst.Net Ltd. 2023 +# +# Written by Rob van der Linde +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +from samba.netcmd import SuperCommand + +from .policy import cmd_domain_auth_policy +from .silo import cmd_domain_auth_silo + + +class cmd_domain_auth(SuperCommand): + """Manage authentication silos and policies on the domain.""" + + subcommands = { + "policy": cmd_domain_auth_policy(), + "silo": cmd_domain_auth_silo(), + } diff --git a/python/samba/netcmd/domain/auth/policy.py b/python/samba/netcmd/domain/auth/policy.py new file mode 100644 index 0000000..de9ce4b --- /dev/null +++ b/python/samba/netcmd/domain/auth/policy.py @@ -0,0 +1,685 @@ +# Unix SMB/CIFS implementation. +# +# authentication silos - authentication policy management +# +# Copyright (C) Catalyst.Net Ltd. 2023 +# +# Written by Rob van der Linde +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import samba.getopt as options +from samba.netcmd import Command, CommandError, Option, SuperCommand +from samba.netcmd.domain.models import AuthenticationPolicy,\ + AuthenticationSilo, Group +from samba.netcmd.domain.models.auth_policy import MIN_TGT_LIFETIME,\ + MAX_TGT_LIFETIME, StrongNTLMPolicy +from samba.netcmd.domain.models.exceptions import ModelError +from samba.netcmd.validators import Range + + +def check_similar_args(option, args): + """Helper method for checking similar mutually exclusive args. + + Example: --user-allowed-to-authenticate-from and + --user-allowed-to-authenticate-from-device-silo + """ + num = sum(arg is not None for arg in args) + if num > 1: + raise CommandError(f"{option} argument repeated {num} times.") + + +class UserOptions(options.OptionGroup): + """User options used by policy create and policy modify commands.""" + + def __init__(self, parser): + super().__init__(parser, "User Options") + + self.add_option("--user-tgt-lifetime-mins", + help="Ticket-Granting-Ticket lifetime for user accounts.", + dest="tgt_lifetime", type=int, action="callback", + callback=self.set_option, + validators=[Range(min=MIN_TGT_LIFETIME, max=MAX_TGT_LIFETIME)]) + self.add_option("--user-allow-ntlm-auth", + help="Allow NTLM network authentication despite the fact that the user " + "is restricted to selected devices.", + dest="allow_ntlm_auth", default=False, + action="callback", callback=self.set_option) + self.add_option("--user-allowed-to-authenticate-from", + help="SDDL Rules setting which device the user is allowed to authenticate from.", + type=str, dest="allowed_to_authenticate_from", + action="callback", callback=self.set_option, + metavar="SDDL") + self.add_option("--user-allowed-to-authenticate-from-device-silo", + help="To authenticate, the user must log in from a device in SILO.", + type=str, dest="allowed_to_authenticate_from_device_silo", + action="callback", callback=self.set_option, + metavar="SILO") + self.add_option("--user-allowed-to-authenticate-from-device-group", + help="To authenticate, the user must log in from a device in GROUP.", + type=str, dest="allowed_to_authenticate_from_device_group", + action="callback", callback=self.set_option, + metavar="GROUP") + self.add_option("--user-allowed-to-authenticate-to", + help="A target service, on a user account, requires the connecting user to match SDDL", + type=str, dest="allowed_to_authenticate_to", + action="callback", callback=self.set_option, + metavar="SDDL") + self.add_option("--user-allowed-to-authenticate-to-by-group", + help="A target service, on a user account, requires the connecting user to be in GROUP", + type=str, dest="allowed_to_authenticate_to_by_group", + action="callback", callback=self.set_option, + metavar="GROUP") + self.add_option("--user-allowed-to-authenticate-to-by-silo", + help="A target service, on a user account, requires the connecting user to be in SILO", + type=str, dest="allowed_to_authenticate_to_by_silo", + action="callback", callback=self.set_option, + metavar="SILO") + + +class ServiceOptions(options.OptionGroup): + """Service options used by policy create and policy modify commands.""" + + def __init__(self, parser): + super().__init__(parser, "Service Options") + + self.add_option("--service-tgt-lifetime-mins", + help="Ticket-Granting-Ticket lifetime for service accounts.", + dest="tgt_lifetime", type=int, action="callback", + callback=self.set_option, + validators=[Range(min=MIN_TGT_LIFETIME, max=MAX_TGT_LIFETIME)]) + self.add_option("--service-allow-ntlm-auth", + help="Allow NTLM network authentication despite " + "the fact that the service account " + "is restricted to selected devices.", + dest="allow_ntlm_auth", default=False, + action="callback", callback=self.set_option) + self.add_option("--service-allowed-to-authenticate-from", + help="SDDL Rules setting which device the " + "service account is allowed to authenticate from.", + type=str, dest="allowed_to_authenticate_from", + action="callback", callback=self.set_option, + metavar="SDDL") + self.add_option("--service-allowed-to-authenticate-from-device-silo", + help="To authenticate, the service must authenticate on a device in SILO.", + type=str, dest="allowed_to_authenticate_from_device_silo", + action="callback", callback=self.set_option, + metavar="SILO") + self.add_option("--service-allowed-to-authenticate-from-device-group", + help="To authenticate, the service must authenticate on a device in GROUP.", + type=str, dest="allowed_to_authenticate_from_device_group", + action="callback", callback=self.set_option, + metavar="GROUP") + self.add_option("--service-allowed-to-authenticate-to", + help="The target service requires the connecting user to match SDDL", + type=str, dest="allowed_to_authenticate_to", + action="callback", callback=self.set_option, + metavar="SDDL") + self.add_option("--service-allowed-to-authenticate-to-by-group", + help="The target service requires the connecting user to be in GROUP", + type=str, dest="allowed_to_authenticate_to_by_group", + action="callback", callback=self.set_option, + metavar="GROUP") + self.add_option("--service-allowed-to-authenticate-to-by-silo", + help="The target service requires the connecting user to be in SILO", + type=str, dest="allowed_to_authenticate_to_by_silo", + action="callback", callback=self.set_option, + metavar="SILO") + + +class ComputerOptions(options.OptionGroup): + """Computer options used by policy create and policy modify commands.""" + + def __init__(self, parser): + super().__init__(parser, "Computer Options") + + self.add_option("--computer-tgt-lifetime-mins", + help="Ticket-Granting-Ticket lifetime for computer accounts.", + dest="tgt_lifetime", type=int, action="callback", + callback=self.set_option, + validators=[Range(min=MIN_TGT_LIFETIME, max=MAX_TGT_LIFETIME)]) + self.add_option("--computer-allowed-to-authenticate-to", + help="The computer account (server, workstation) service requires the connecting user to match SDDL", + type=str, dest="allowed_to_authenticate_to", + action="callback", callback=self.set_option, + metavar="SDDL") + self.add_option("--computer-allowed-to-authenticate-to-by-group", + help="The computer account (server, workstation) service requires the connecting user to be in GROUP", + type=str, dest="allowed_to_authenticate_to_by_group", + action="callback", callback=self.set_option, + metavar="GROUP") + self.add_option("--computer-allowed-to-authenticate-to-by-silo", + help="The computer account (server, workstation) service requires the connecting user to be in SILO", + type=str, dest="allowed_to_authenticate_to_by_silo", + action="callback", callback=self.set_option, + metavar="SILO") + + +class cmd_domain_auth_policy_list(Command): + """List authentication policies on the domain.""" + + synopsis = "%prog -H [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "hostopts": options.HostOptions, + } + + takes_options = [ + Option("--json", help="Output results in JSON format.", + dest="output_format", action="store_const", const="json"), + ] + + def run(self, hostopts=None, sambaopts=None, credopts=None, + output_format=None): + + ldb = self.ldb_connect(hostopts, sambaopts, credopts) + + # Authentication policies grouped by cn. + try: + policies = {policy.cn: policy.as_dict() + for policy in AuthenticationPolicy.query(ldb)} + except ModelError as e: + raise CommandError(e) + + # Using json output format gives more detail. + if output_format == "json": + self.print_json(policies) + else: + for policy in policies.keys(): + self.outf.write(f"{policy}\n") + + +class cmd_domain_auth_policy_view(Command): + """View an authentication policy on the domain.""" + + synopsis = "%prog -H [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "hostopts": options.HostOptions, + } + + takes_options = [ + Option("--name", + help="Name of authentication policy to view (required).", + dest="name", action="store", type=str, required=True), + ] + + def run(self, hostopts=None, sambaopts=None, credopts=None, name=None): + + ldb = self.ldb_connect(hostopts, sambaopts, credopts) + + try: + policy = AuthenticationPolicy.get(ldb, cn=name) + except ModelError as e: + raise CommandError(e) + + # Check if authentication policy exists first. + if policy is None: + raise CommandError(f"Authentication policy {name} not found.") + + # Display policy as JSON. + self.print_json(policy.as_dict()) + + +class cmd_domain_auth_policy_create(Command): + """Create an authentication policy on the domain.""" + + synopsis = "%prog -H [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "hostopts": options.HostOptions, + "useropts": UserOptions, + "serviceopts": ServiceOptions, + "computeropts": ComputerOptions, + } + + takes_options = [ + Option("--name", help="Name of authentication policy (required).", + dest="name", action="store", type=str, required=True), + Option("--description", + help="Optional description for authentication policy.", + dest="description", action="store", type=str), + Option("--protect", + help="Protect authentication silo from accidental deletion.", + dest="protect", action="store_true"), + Option("--unprotect", + help="Unprotect authentication silo from accidental deletion.", + dest="unprotect", action="store_true"), + Option("--audit", + help="Only audit authentication policy.", + dest="audit", action="store_true"), + Option("--enforce", + help="Enforce authentication policy.", + dest="enforce", action="store_true"), + Option("--strong-ntlm-policy", + help=f"Strong NTLM Policy ({StrongNTLMPolicy.choices_str()}).", + dest="strong_ntlm_policy", type="choice", action="store", + choices=StrongNTLMPolicy.get_choices(), + default="Disabled"), + ] + + def run(self, hostopts=None, sambaopts=None, credopts=None, useropts=None, + serviceopts=None, computeropts=None, name=None, description=None, + protect=None, unprotect=None, audit=None, enforce=None, + strong_ntlm_policy=None): + + if protect and unprotect: + raise CommandError("--protect and --unprotect cannot be used together.") + if audit and enforce: + raise CommandError("--audit and --enforce cannot be used together.") + + # Check for repeated, similar arguments. + check_similar_args("--user-allowed-to-authenticate-from", + [useropts.allowed_to_authenticate_from, + useropts.allowed_to_authenticate_from_device_group, + useropts.allowed_to_authenticate_from_device_silo]) + check_similar_args("--user-allowed-to-authenticate-to", + [useropts.allowed_to_authenticate_to, + useropts.allowed_to_authenticate_to_by_group, + useropts.allowed_to_authenticate_to_by_silo]) + check_similar_args("--service-allowed-to-authenticate-from", + [serviceopts.allowed_to_authenticate_from, + serviceopts.allowed_to_authenticate_from_device_group, + serviceopts.allowed_to_authenticate_from_device_silo]) + check_similar_args("--service-allowed-to-authenticate-to", + [serviceopts.allowed_to_authenticate_to, + serviceopts.allowed_to_authenticate_to_by_group, + serviceopts.allowed_to_authenticate_to_by_silo]) + check_similar_args("--computer-allowed-to-authenticate-to", + [computeropts.allowed_to_authenticate_to, + computeropts.allowed_to_authenticate_to_by_group, + computeropts.allowed_to_authenticate_to_by_silo]) + + ldb = self.ldb_connect(hostopts, sambaopts, credopts) + + # Generate SDDL for authenticating users from a device in a group + if useropts.allowed_to_authenticate_from_device_group: + group = Group.get( + ldb, cn=useropts.allowed_to_authenticate_from_device_group) + useropts.allowed_to_authenticate_from = group.get_authentication_sddl() + + # Generate SDDL for authenticating users from a device in a silo + if useropts.allowed_to_authenticate_from_device_silo: + silo = AuthenticationSilo.get( + ldb, cn=useropts.allowed_to_authenticate_from_device_silo) + useropts.allowed_to_authenticate_from = silo.get_authentication_sddl() + + # Generate SDDL for authenticating user accounts to a group + if useropts.allowed_to_authenticate_to_by_group: + group = Group.get( + ldb, cn=useropts.allowed_to_authenticate_to_by_group) + useropts.allowed_to_authenticate_to = group.get_authentication_sddl() + + # Generate SDDL for authenticating user accounts to a silo + if useropts.allowed_to_authenticate_to_by_silo: + silo = AuthenticationSilo.get( + ldb, cn=useropts.allowed_to_authenticate_to_by_silo) + useropts.allowed_to_authenticate_to = silo.get_authentication_sddl() + + # Generate SDDL for authenticating service accounts from a device in a group + if serviceopts.allowed_to_authenticate_from_device_group: + group = Group.get( + ldb, cn=serviceopts.allowed_to_authenticate_from_device_group) + serviceopts.allowed_to_authenticate_from = group.get_authentication_sddl() + + # Generate SDDL for authenticating service accounts from a device in a silo + if serviceopts.allowed_to_authenticate_from_device_silo: + silo = AuthenticationSilo.get( + ldb, cn=serviceopts.allowed_to_authenticate_from_device_silo) + serviceopts.allowed_to_authenticate_from = silo.get_authentication_sddl() + + # Generate SDDL for authenticating service accounts to a group + if serviceopts.allowed_to_authenticate_to_by_group: + group = Group.get( + ldb, cn=serviceopts.allowed_to_authenticate_to_by_group) + serviceopts.allowed_to_authenticate_to = group.get_authentication_sddl() + + # Generate SDDL for authenticating service accounts to a silo + if serviceopts.allowed_to_authenticate_to_by_silo: + silo = AuthenticationSilo.get( + ldb, cn=serviceopts.allowed_to_authenticate_to_by_silo) + serviceopts.allowed_to_authenticate_to = silo.get_authentication_sddl() + + # Generate SDDL for authenticating computer accounts to a group + if computeropts.allowed_to_authenticate_to_by_group: + group = Group.get( + ldb, cn=computeropts.allowed_to_authenticate_to_by_group) + computeropts.allowed_to_authenticate_to = group.get_authentication_sddl() + + # Generate SDDL for authenticating computer accounts to a silo + if computeropts.allowed_to_authenticate_to_by_silo: + silo = AuthenticationSilo.get( + ldb, cn=computeropts.allowed_to_authenticate_to_by_silo) + computeropts.allowed_to_authenticate_to = silo.get_authentication_sddl() + + try: + policy = AuthenticationPolicy.get(ldb, cn=name) + except ModelError as e: + raise CommandError(e) + + # Make sure authentication policy doesn't already exist. + if policy is not None: + raise CommandError(f"Authentication policy {name} already exists.") + + # New policy object. + policy = AuthenticationPolicy( + cn=name, + description=description, + strong_ntlm_policy=StrongNTLMPolicy[strong_ntlm_policy.upper()], + user_allow_ntlm_auth=useropts.allow_ntlm_auth, + user_tgt_lifetime=useropts.tgt_lifetime, + user_allowed_to_authenticate_from=useropts.allowed_to_authenticate_from, + user_allowed_to_authenticate_to=useropts.allowed_to_authenticate_to, + service_allow_ntlm_auth=serviceopts.allow_ntlm_auth, + service_tgt_lifetime=serviceopts.tgt_lifetime, + service_allowed_to_authenticate_from=serviceopts.allowed_to_authenticate_from, + service_allowed_to_authenticate_to=serviceopts.allowed_to_authenticate_to, + computer_tgt_lifetime=computeropts.tgt_lifetime, + computer_allowed_to_authenticate_to=computeropts.allowed_to_authenticate_to, + ) + + # Either --enforce will be set or --audit but never both. + # The default if both are missing is enforce=True. + if enforce is not None: + policy.enforced = enforce + else: + policy.enforced = not audit + + # Create policy. + try: + policy.save(ldb) + + if protect: + policy.protect(ldb) + except ModelError as e: + raise CommandError(e) + + # Authentication policy created successfully. + self.outf.write(f"Created authentication policy: {name}\n") + + +class cmd_domain_auth_policy_modify(Command): + """Modify authentication policies on the domain.""" + + synopsis = "%prog -H [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "hostopts": options.HostOptions, + "useropts": UserOptions, + "serviceopts": ServiceOptions, + "computeropts": ComputerOptions, + } + + takes_options = [ + Option("--name", help="Name of authentication policy (required).", + dest="name", action="store", type=str, required=True), + Option("--description", + help="Optional description for authentication policy.", + dest="description", action="store", type=str), + Option("--protect", + help="Protect authentication silo from accidental deletion.", + dest="protect", action="store_true"), + Option("--unprotect", + help="Unprotect authentication silo from accidental deletion.", + dest="unprotect", action="store_true"), + Option("--audit", + help="Only audit authentication policy.", + dest="audit", action="store_true"), + Option("--enforce", + help="Enforce authentication policy.", + dest="enforce", action="store_true"), + Option("--strong-ntlm-policy", + help=f"Strong NTLM Policy ({StrongNTLMPolicy.choices_str()}).", + dest="strong_ntlm_policy", type="choice", action="store", + choices=StrongNTLMPolicy.get_choices()), + ] + + def run(self, hostopts=None, sambaopts=None, credopts=None, useropts=None, + serviceopts=None, computeropts=None, name=None, description=None, + protect=None, unprotect=None, audit=None, enforce=None, + strong_ntlm_policy=None): + + if protect and unprotect: + raise CommandError("--protect and --unprotect cannot be used together.") + if audit and enforce: + raise CommandError("--audit and --enforce cannot be used together.") + + # Check for repeated, similar arguments. + check_similar_args("--user-allowed-to-authenticate-from", + [useropts.allowed_to_authenticate_from, + useropts.allowed_to_authenticate_from_device_group, + useropts.allowed_to_authenticate_from_device_silo]) + check_similar_args("--user-allowed-to-authenticate-to", + [useropts.allowed_to_authenticate_to, + useropts.allowed_to_authenticate_to_by_group, + useropts.allowed_to_authenticate_to_by_silo]) + check_similar_args("--service-allowed-to-authenticate-from", + [serviceopts.allowed_to_authenticate_from, + serviceopts.allowed_to_authenticate_from_device_group, + serviceopts.allowed_to_authenticate_from_device_silo]) + check_similar_args("--service-allowed-to-authenticate-to", + [serviceopts.allowed_to_authenticate_to, + serviceopts.allowed_to_authenticate_to_by_group, + serviceopts.allowed_to_authenticate_to_by_silo]) + check_similar_args("--computer-allowed-to-authenticate-to", + [computeropts.allowed_to_authenticate_to, + computeropts.allowed_to_authenticate_to_by_group, + computeropts.allowed_to_authenticate_to_by_silo]) + + ldb = self.ldb_connect(hostopts, sambaopts, credopts) + + # Generate SDDL for authenticating users from a device in a group + if useropts.allowed_to_authenticate_from_device_group: + group = Group.get( + ldb, cn=useropts.allowed_to_authenticate_from_device_group) + useropts.allowed_to_authenticate_from = group.get_authentication_sddl() + + # Generate SDDL for authenticating users from a device in a silo + if useropts.allowed_to_authenticate_from_device_silo: + silo = AuthenticationSilo.get( + ldb, cn=useropts.allowed_to_authenticate_from_device_silo) + useropts.allowed_to_authenticate_from = silo.get_authentication_sddl() + + # Generate SDDL for authenticating user accounts to a group + if useropts.allowed_to_authenticate_to_by_group: + group = Group.get( + ldb, cn=useropts.allowed_to_authenticate_to_by_group) + useropts.allowed_to_authenticate_to = group.get_authentication_sddl() + + # Generate SDDL for authenticating user accounts to a silo + if useropts.allowed_to_authenticate_to_by_silo: + silo = AuthenticationSilo.get( + ldb, cn=useropts.allowed_to_authenticate_to_by_silo) + useropts.allowed_to_authenticate_to = silo.get_authentication_sddl() + + # Generate SDDL for authenticating users from a device a device in a group + if serviceopts.allowed_to_authenticate_from_device_group: + group = Group.get( + ldb, cn=serviceopts.allowed_to_authenticate_from_device_group) + serviceopts.allowed_to_authenticate_from = group.get_authentication_sddl() + + # Generate SDDL for authenticating service accounts from a device in a silo + if serviceopts.allowed_to_authenticate_from_device_silo: + silo = AuthenticationSilo.get( + ldb, cn=serviceopts.allowed_to_authenticate_from_device_silo) + serviceopts.allowed_to_authenticate_from = silo.get_authentication_sddl() + + # Generate SDDL for authenticating service accounts to a group + if serviceopts.allowed_to_authenticate_to_by_group: + group = Group.get( + ldb, cn=serviceopts.allowed_to_authenticate_to_by_group) + serviceopts.allowed_to_authenticate_to = group.get_authentication_sddl() + + # Generate SDDL for authenticating service accounts to a silo + if serviceopts.allowed_to_authenticate_to_by_silo: + silo = AuthenticationSilo.get( + ldb, cn=serviceopts.allowed_to_authenticate_to_by_silo) + serviceopts.allowed_to_authenticate_to = silo.get_authentication_sddl() + + # Generate SDDL for authenticating computer accounts to a group + if computeropts.allowed_to_authenticate_to_by_group: + group = Group.get( + ldb, cn=computeropts.allowed_to_authenticate_to_by_group) + computeropts.allowed_to_authenticate_to = group.get_authentication_sddl() + + # Generate SDDL for authenticating computer accounts to a silo + if computeropts.allowed_to_authenticate_to_by_silo: + silo = AuthenticationSilo.get( + ldb, cn=computeropts.allowed_to_authenticate_to_by_silo) + computeropts.allowed_to_authenticate_to = silo.get_authentication_sddl() + + try: + policy = AuthenticationPolicy.get(ldb, cn=name) + except ModelError as e: + raise CommandError(e) + + # Check if authentication policy exists. + if policy is None: + raise CommandError(f"Authentication policy {name} not found.") + + # Either --enforce will be set or --audit but never both. + if enforce: + policy.enforced = True + elif audit: + policy.enforced = False + + # Update the description. + if description is not None: + policy.description = description + + # User sign on + ############### + + if strong_ntlm_policy is not None: + policy.strong_ntlm_policy = \ + StrongNTLMPolicy[strong_ntlm_policy.upper()] + + if useropts.tgt_lifetime is not None: + policy.user_tgt_lifetime = useropts.tgt_lifetime + + if useropts.allowed_to_authenticate_from is not None: + policy.user_allowed_to_authenticate_from = \ + useropts.allowed_to_authenticate_from + + if useropts.allowed_to_authenticate_to is not None: + policy.user_allowed_to_authenticate_to = \ + useropts.allowed_to_authenticate_to + + # Service sign on + ################## + + if serviceopts.tgt_lifetime is not None: + policy.service_tgt_lifetime = serviceopts.tgt_lifetime + + if serviceopts.allowed_to_authenticate_from is not None: + policy.service_allowed_to_authenticate_from = \ + serviceopts.allowed_to_authenticate_from + + if serviceopts.allowed_to_authenticate_to is not None: + policy.service_allowed_to_authenticate_to = \ + serviceopts.allowed_to_authenticate_to + + # Computer + ########### + + if computeropts.tgt_lifetime is not None: + policy.computer_tgt_lifetime = computeropts.tgt_lifetime + + if computeropts.allowed_to_authenticate_to is not None: + policy.computer_allowed_to_authenticate_to = \ + computeropts.allowed_to_authenticate_to + + # Update policy. + try: + policy.save(ldb) + + if protect: + policy.protect(ldb) + elif unprotect: + policy.unprotect(ldb) + except ModelError as e: + raise CommandError(e) + + # Authentication policy updated successfully. + self.outf.write(f"Updated authentication policy: {name}\n") + + +class cmd_domain_auth_policy_delete(Command): + """Delete authentication policies on the domain.""" + + synopsis = "%prog -H [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "hostopts": options.HostOptions, + } + + takes_options = [ + Option("--name", help="Name of authentication policy (required).", + dest="name", action="store", type=str, required=True), + Option("--force", help="Force delete protected authentication policy.", + dest="force", action="store_true") + ] + + def run(self, hostopts=None, sambaopts=None, credopts=None, name=None, + force=None): + + ldb = self.ldb_connect(hostopts, sambaopts, credopts) + + try: + policy = AuthenticationPolicy.get(ldb, cn=name) + except ModelError as e: + raise CommandError(e) + + # Check if authentication policy exists first. + if policy is None: + raise CommandError(f"Authentication policy {name} not found.") + + # Delete item, --force removes delete protection first. + try: + if force: + policy.unprotect(ldb) + + policy.delete(ldb) + except ModelError as e: + if not force: + raise CommandError( + f"{e}\nTry --force to delete protected authentication policies.") + else: + raise CommandError(e) + + # Authentication policy deleted successfully. + self.outf.write(f"Deleted authentication policy: {name}\n") + + +class cmd_domain_auth_policy(SuperCommand): + """Manage authentication policies on the domain.""" + + subcommands = { + "list": cmd_domain_auth_policy_list(), + "view": cmd_domain_auth_policy_view(), + "create": cmd_domain_auth_policy_create(), + "modify": cmd_domain_auth_policy_modify(), + "delete": cmd_domain_auth_policy_delete(), + } diff --git a/python/samba/netcmd/domain/auth/silo.py b/python/samba/netcmd/domain/auth/silo.py new file mode 100644 index 0000000..2e27761 --- /dev/null +++ b/python/samba/netcmd/domain/auth/silo.py @@ -0,0 +1,402 @@ +# Unix SMB/CIFS implementation. +# +# authentication silos - authentication silo management +# +# Copyright (C) Catalyst.Net Ltd. 2023 +# +# Written by Rob van der Linde +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import samba.getopt as options +from samba.netcmd import Command, CommandError, Option, SuperCommand +from samba.netcmd.domain.models import AuthenticationPolicy, AuthenticationSilo +from samba.netcmd.domain.models.exceptions import ModelError + +from .silo_member import cmd_domain_auth_silo_member + + +class cmd_domain_auth_silo_list(Command): + """List authentication silos on the domain.""" + + synopsis = "%prog -H [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "hostopts": options.HostOptions, + } + + takes_options = [ + Option("--json", help="Output results in JSON format.", + dest="output_format", action="store_const", const="json"), + ] + + def run(self, hostopts=None, sambaopts=None, credopts=None, + output_format=None): + + ldb = self.ldb_connect(hostopts, sambaopts, credopts) + + # Authentication silos grouped by cn. + try: + silos = {silo.cn: silo.as_dict() + for silo in AuthenticationSilo.query(ldb)} + except ModelError as e: + raise CommandError(e) + + # Using json output format gives more detail. + if output_format == "json": + self.print_json(silos) + else: + for silo in silos.keys(): + self.outf.write(f"{silo}\n") + + +class cmd_domain_auth_silo_view(Command): + """View an authentication silo on the domain.""" + + synopsis = "%prog -H [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "hostopts": options.HostOptions, + } + + takes_options = [ + Option("--name", + help="Name of authentication silo to view (required).", + dest="name", action="store", type=str, required=True), + ] + + def run(self, hostopts=None, sambaopts=None, credopts=None, name=None): + + ldb = self.ldb_connect(hostopts, sambaopts, credopts) + + try: + silo = AuthenticationSilo.get(ldb, cn=name) + except ModelError as e: + raise CommandError(e) + + # Check if silo exists first. + if silo is None: + raise CommandError(f"Authentication silo {name} not found.") + + # Display silo as JSON. + self.print_json(silo.as_dict()) + + +class cmd_domain_auth_silo_create(Command): + """Create a new authentication silo on the domain.""" + + synopsis = "%prog -H [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "hostopts": options.HostOptions, + } + + takes_options = [ + Option("--name", help="Name of authentication silo (required).", + dest="name", action="store", type=str, required=True), + Option("--description", + help="Optional description for authentication silo.", + dest="description", action="store", type=str), + Option("--user-authentication-policy", + help="User account authentication policy.", + dest="user_authentication_policy", action="store", type=str, + metavar="USER_POLICY"), + Option("--service-authentication-policy", + help="Managed service account authentication policy.", + dest="service_authentication_policy", action="store", type=str, + metavar="SERVICE_POLICY"), + Option("--computer-authentication-policy", + help="Computer authentication policy.", + dest="computer_authentication_policy", action="store", type=str, + metavar="COMPUTER_POLICY"), + Option("--protect", + help="Protect authentication silo from accidental deletion.", + dest="protect", action="store_true"), + Option("--unprotect", + help="Unprotect authentication silo from accidental deletion.", + dest="unprotect", action="store_true"), + Option("--audit", + help="Only audit silo policies.", + dest="audit", action="store_true"), + Option("--enforce", + help="Enforce silo policies.", + dest="enforce", action="store_true") + ] + + @staticmethod + def get_policy(ldb, name): + """Helper function to fetch auth policy or raise CommandError. + + :param ldb: Ldb connection + :param name: Either the DN or name of authentication policy + """ + try: + return AuthenticationPolicy.lookup(ldb, name) + except (LookupError, ValueError) as e: + raise CommandError(e) + + def run(self, hostopts=None, sambaopts=None, credopts=None, + name=None, description=None, + user_authentication_policy=None, + service_authentication_policy=None, + computer_authentication_policy=None, + protect=None, unprotect=None, + audit=None, enforce=None): + + if protect and unprotect: + raise CommandError("--protect and --unprotect cannot be used together.") + if audit and enforce: + raise CommandError("--audit and --enforce cannot be used together.") + + ldb = self.ldb_connect(hostopts, sambaopts, credopts) + + try: + silo = AuthenticationSilo.get(ldb, cn=name) + except ModelError as e: + raise CommandError(e) + + # Make sure silo doesn't already exist. + if silo is not None: + raise CommandError(f"Authentication silo {name} already exists.") + + # New silo object. + silo = AuthenticationSilo(cn=name, description=description) + + # Set user policy + if user_authentication_policy: + silo.user_authentication_policy = \ + self.get_policy(ldb, user_authentication_policy).dn + + # Set service policy + if service_authentication_policy: + silo.service_authentication_policy = \ + self.get_policy(ldb, service_authentication_policy).dn + + # Set computer policy + if computer_authentication_policy: + silo.computer_authentication_policy = \ + self.get_policy(ldb, computer_authentication_policy).dn + + # Either --enforce will be set or --audit but never both. + # The default if both are missing is enforce=True. + if enforce is not None: + silo.enforced = enforce + else: + silo.enforced = not audit + + # Create silo + try: + silo.save(ldb) + + if protect: + silo.protect(ldb) + except ModelError as e: + raise CommandError(e) + + # Authentication silo created successfully. + self.outf.write(f"Created authentication silo: {name}\n") + + +class cmd_domain_auth_silo_modify(Command): + """Modify an authentication silo on the domain.""" + + synopsis = "%prog -H [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "hostopts": options.HostOptions, + } + + takes_options = [ + Option("--name", help="Name of authentication silo (required).", + dest="name", action="store", type=str, required=True), + Option("--description", + help="Optional description for authentication silo.", + dest="description", action="store", type=str), + Option("--user-authentication-policy", + help="User account authentication policy.", + dest="user_authentication_policy", action="store", type=str, + metavar="USER_POLICY"), + Option("--service-authentication-policy", + help="Managed service account authentication policy.", + dest="service_authentication_policy", action="store", type=str, + metavar="SERVICE_POLICY"), + Option("--computer-authentication-policy", + help="Computer authentication policy.", + dest="computer_authentication_policy", action="store", type=str, + metavar="COMPUTER_POLICY"), + Option("--protect", + help="Protect authentication silo from accidental deletion.", + dest="protect", action="store_true"), + Option("--unprotect", + help="Unprotect authentication silo from accidental deletion.", + dest="unprotect", action="store_true"), + Option("--audit", + help="Only audit silo policies.", + dest="audit", action="store_true"), + Option("--enforce", + help="Enforce silo policies.", + dest="enforce", action="store_true") + ] + + @staticmethod + def get_policy(ldb, name): + """Helper function to fetch auth policy or raise CommandError. + + :param ldb: Ldb connection + :param name: Either the DN or name of authentication policy + """ + try: + return AuthenticationPolicy.lookup(ldb, name) + except (LookupError, ModelError, ValueError) as e: + raise CommandError(e) + + def run(self, hostopts=None, sambaopts=None, credopts=None, + name=None, description=None, + user_authentication_policy=None, + service_authentication_policy=None, + computer_authentication_policy=None, + protect=None, unprotect=None, + audit=None, enforce=None): + + if audit and enforce: + raise CommandError("--audit and --enforce cannot be used together.") + if protect and unprotect: + raise CommandError("--protect and --unprotect cannot be used together.") + + ldb = self.ldb_connect(hostopts, sambaopts, credopts) + + try: + silo = AuthenticationSilo.get(ldb, cn=name) + except ModelError as e: + raise CommandError(e) + + # Check if silo exists first. + if silo is None: + raise CommandError(f"Authentication silo {name} not found.") + + # Either --enforce will be set or --audit but never both. + if enforce: + silo.enforced = True + elif audit: + silo.enforced = False + + # Update the description. + if description is not None: + silo.description = description + + # Set or unset user policy. + if user_authentication_policy == "": + silo.user_authentication_policy = None + elif user_authentication_policy: + silo.user_authentication_policy = \ + self.get_policy(ldb, user_authentication_policy).dn + + # Set or unset service policy. + if service_authentication_policy == "": + silo.service_authentication_policy = None + elif service_authentication_policy: + silo.service_authentication_policy = \ + self.get_policy(ldb, service_authentication_policy).dn + + # Set or unset computer policy. + if computer_authentication_policy == "": + silo.computer_authentication_policy = None + elif computer_authentication_policy: + silo.computer_authentication_policy = \ + self.get_policy(ldb, computer_authentication_policy).dn + + # Update silo + try: + silo.save(ldb) + + if protect: + silo.protect(ldb) + elif unprotect: + silo.unprotect(ldb) + except ModelError as e: + raise CommandError(e) + + # Silo updated successfully. + self.outf.write(f"Updated authentication silo: {name}\n") + + +class cmd_domain_auth_silo_delete(Command): + """Delete an authentication silo on the domain.""" + + synopsis = "%prog -H [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "hostopts": options.HostOptions, + } + + takes_options = [ + Option("--name", help="Name of authentication silo (required).", + dest="name", action="store", type=str, required=True), + Option("--force", help="Force delete protected authentication silo.", + dest="force", action="store_true") + ] + + def run(self, hostopts=None, sambaopts=None, credopts=None, name=None, + force=None): + + ldb = self.ldb_connect(hostopts, sambaopts, credopts) + + try: + silo = AuthenticationSilo.get(ldb, cn=name) + except ModelError as e: + raise CommandError(e) + + # Check if silo exists first. + if silo is None: + raise CommandError(f"Authentication silo {name} not found.") + + # Delete silo + try: + if force: + silo.unprotect(ldb) + + silo.delete(ldb) + except ModelError as e: + if not force: + raise CommandError( + f"{e}\nTry --force to delete protected authentication silos.") + else: + raise CommandError(e) + + # Authentication silo deleted successfully. + self.outf.write(f"Deleted authentication silo: {name}\n") + + +class cmd_domain_auth_silo(SuperCommand): + """Manage authentication silos on the domain.""" + + subcommands = { + "list": cmd_domain_auth_silo_list(), + "view": cmd_domain_auth_silo_view(), + "create": cmd_domain_auth_silo_create(), + "modify": cmd_domain_auth_silo_modify(), + "delete": cmd_domain_auth_silo_delete(), + "member": cmd_domain_auth_silo_member(), + } diff --git a/python/samba/netcmd/domain/auth/silo_member.py b/python/samba/netcmd/domain/auth/silo_member.py new file mode 100644 index 0000000..9b41400 --- /dev/null +++ b/python/samba/netcmd/domain/auth/silo_member.py @@ -0,0 +1,201 @@ +# Unix SMB/CIFS implementation. +# +# authentication silos - silo member management +# +# Copyright (C) Catalyst.Net Ltd. 2023 +# +# Written by Rob van der Linde +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import samba.getopt as options +from samba.netcmd import Command, CommandError, Option, SuperCommand +from samba.netcmd.domain.models import AuthenticationSilo, User +from samba.netcmd.domain.models.exceptions import ModelError + + +class cmd_domain_auth_silo_member_grant(Command): + """Grant a member access to an authentication silo.""" + + synopsis = "%prog -H [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "hostopts": options.HostOptions, + } + + takes_options = [ + Option("--name", + help="Name of authentication silo (required).", + dest="name", action="store", type=str, required=True), + Option("--member", + help="Member to grant access to the silo (DN or account name).", + dest="member", action="store", type=str, required=True), + ] + + def run(self, hostopts=None, sambaopts=None, credopts=None, + name=None, member=None): + + ldb = self.ldb_connect(hostopts, sambaopts, credopts) + + try: + silo = AuthenticationSilo.get(ldb, cn=name) + except ModelError as e: + raise CommandError(e) + + # Check if authentication silo exists first. + if silo is None: + raise CommandError(f"Authentication silo {name} not found.") + + try: + user = User.find(ldb, member) + except ModelError as e: + raise CommandError(e) + + # Ensure the user actually exists first. + if user is None: + raise CommandError(f"User {member} not found.") + + # Grant access to member. + try: + silo.grant(ldb, user) + except ModelError as e: + raise CommandError(e) + + # Display silo assigned status. + if user.assigned_silo and user.assigned_silo == silo.dn: + status = "assigned" + else: + status = "unassigned" + + print(f"User {user} granted access to the authentication silo {name} ({status}).", + file=self.outf) + + +class cmd_domain_auth_silo_member_list(Command): + """List all members in the authentication silo.""" + + synopsis = "%prog -H [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "hostopts": options.HostOptions, + } + + takes_options = [ + Option("--name", + help="Name of authentication silo (required).", + dest="name", action="store", type=str, required=True), + Option("--json", help="Output results in JSON format.", + dest="output_format", action="store_const", const="json"), + ] + + def run(self, hostopts=None, sambaopts=None, credopts=None, + name=None, output_format=None): + + ldb = self.ldb_connect(hostopts, sambaopts, credopts) + + try: + silo = AuthenticationSilo.get(ldb, cn=name) + except ModelError as e: + raise CommandError(e) + + # Check if authentication silo exists first. + if silo is None: + raise CommandError(f"Authentication silo {name} not found.") + + # Fetch all members. + try: + members = [User.get(ldb, dn=dn) for dn in silo.members] + except ModelError as e: + raise CommandError(e) + + # Using json output format gives more detail. + if output_format == "json": + self.print_json([member.as_dict() for member in members]) + else: + for member in members: + print(member.dn, file=self.outf) + + +class cmd_domain_auth_silo_member_revoke(Command): + """Revoke a member from an authentication silo.""" + + synopsis = "%prog -H [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "hostopts": options.HostOptions, + } + + takes_options = [ + Option("--name", + help="Name of authentication silo (required).", + dest="name", action="store", type=str, required=True), + Option("--member", + help="Member to revoke from the silo (DN or account name).", + dest="member", action="store", type=str, required=True), + ] + + def run(self, hostopts=None, sambaopts=None, credopts=None, + name=None, member=None): + + ldb = self.ldb_connect(hostopts, sambaopts, credopts) + + try: + silo = AuthenticationSilo.get(ldb, cn=name) + except ModelError as e: + raise CommandError(e) + + # Check if authentication silo exists first. + if silo is None: + raise CommandError(f"Authentication silo {name} not found.") + + try: + user = User.find(ldb, member) + except ModelError as e: + raise CommandError(e) + + # Ensure the user actually exists first. + if user is None: + raise CommandError(f"User {member} not found.") + + # Revoke member access. + try: + silo.revoke(ldb, user) + except ModelError as e: + raise CommandError(e) + + # Display silo assigned status. + if user.assigned_silo and user.assigned_silo == silo.dn: + status = "assigned" + else: + status = "unassigned" + + print(f"User {user} revoked from the authentication silo {name} ({status}).", + file=self.outf) + + +class cmd_domain_auth_silo_member(SuperCommand): + """Manage members in an authentication silo.""" + + subcommands = { + "grant": cmd_domain_auth_silo_member_grant(), + "list": cmd_domain_auth_silo_member_list(), + "revoke": cmd_domain_auth_silo_member_revoke(), + } diff --git a/python/samba/netcmd/domain/backup.py b/python/samba/netcmd/domain/backup.py new file mode 100644 index 0000000..fc7ff53 --- /dev/null +++ b/python/samba/netcmd/domain/backup.py @@ -0,0 +1,1256 @@ +# domain_backup +# +# Copyright Andrew Bartlett +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +import datetime +import os +import sys +import logging +import shutil +import tempfile +import samba +import tdb +import samba.getopt as options +from samba.samdb import SamDB, get_default_backend_store +import ldb +from ldb import LdbError +from samba.samba3 import libsmb_samba_internal as libsmb +from samba.samba3 import param as s3param +from samba.ntacls import backup_online, backup_restore, backup_offline +from samba.auth import system_session +from samba.join import DCJoinContext, join_clone, DCCloneAndRenameContext +from samba.dcerpc.security import dom_sid +from samba.netcmd import Option, CommandError +from samba.dcerpc import misc, security, drsblobs +from samba import Ldb +from samba.netcmd.fsmo import cmd_fsmo_seize +from samba.provision import make_smbconf, DEFAULTSITE +from samba.upgradehelpers import update_krbtgt_account_password +from samba.remove_dc import remove_dc +from samba.provision import secretsdb_self_join +from samba.dbchecker import dbcheck +import re +from samba.provision import guess_names, determine_host_ip, determine_host_ip6 +from samba.provision.sambadns import (fill_dns_data_partitions, + get_dnsadmins_sid, + get_domainguid) +from samba.tdb_util import tdb_copy +from samba.mdb_util import mdb_copy +import errno +from subprocess import CalledProcessError +from samba import sites +from samba.dsdb import _dsdb_load_udv_v2 +from samba.ndr import ndr_pack +from samba.credentials import SMB_SIGNING_REQUIRED +from samba import safe_tarfile as tarfile + + +# work out a SID (based on a free RID) to use when the domain gets restored. +# This ensures that the restored DC's SID won't clash with any other RIDs +# already in use in the domain +def get_sid_for_restore(samdb, logger): + # Allocate a new RID without modifying the database. This should be safe, + # because we acquire the RID master role after creating an account using + # this RID during the restore process. Acquiring the RID master role + # creates a new RID pool which we will fetch RIDs from, so we shouldn't get + # duplicates. + try: + rid = samdb.next_free_rid() + except LdbError as err: + logger.info("A SID could not be allocated for restoring the domain. " + "Either no RID Set was found on this DC, " + "or the RID Set was not usable.") + logger.info("To initialise this DC's RID pools, obtain a RID Set from " + "this domain's RID master, or run samba-tool dbcheck " + "to fix the existing RID Set.") + raise CommandError("Cannot create backup", err) + + # Construct full SID + sid = dom_sid(samdb.get_domain_sid()) + sid_for_restore = str(sid) + '-' + str(rid) + + # Confirm the SID is not already in use + try: + res = samdb.search(scope=ldb.SCOPE_BASE, + base='' % sid_for_restore, + attrs=[], + controls=['show_deleted:1', + 'show_recycled:1']) + if len(res) != 1: + # This case makes no sense, but neither does a corrupt RID set + raise CommandError("Cannot create backup - " + "this DC's RID pool is corrupt, " + "the next SID (%s) appears to be in use." % + sid_for_restore) + raise CommandError("Cannot create backup - " + "this DC's RID pool is corrupt, " + "the next SID %s points to existing object %s. " + "Please run samba-tool dbcheck on the source DC." % + (sid_for_restore, res[0].dn)) + except ldb.LdbError as e: + (enum, emsg) = e.args + if enum != ldb.ERR_NO_SUCH_OBJECT: + # We want NO_SUCH_OBJECT, anything else is a serious issue + raise + + return str(sid) + '-' + str(rid) + + +def smb_sysvol_conn(server, lp, creds): + """Returns an SMB connection to the sysvol share on the DC""" + # the SMB bindings rely on having a s3 loadparm + s3_lp = s3param.get_context() + s3_lp.load(lp.configfile) + + # Force signing for the connection + saved_signing_state = creds.get_smb_signing() + creds.set_smb_signing(SMB_SIGNING_REQUIRED) + conn = libsmb.Conn(server, "sysvol", lp=s3_lp, creds=creds) + # Reset signing state + creds.set_smb_signing(saved_signing_state) + return conn + + +def get_timestamp(): + return datetime.datetime.now().isoformat().replace(':', '-') + + +def backup_filepath(targetdir, name, time_str): + filename = 'samba-backup-%s-%s.tar.bz2' % (name, time_str) + return os.path.join(targetdir, filename) + + +def create_backup_tar(logger, tmpdir, backup_filepath): + # Adds everything in the tmpdir into a new tar file + logger.info("Creating backup file %s..." % backup_filepath) + tf = tarfile.open(backup_filepath, 'w:bz2') + tf.add(tmpdir, arcname='./') + tf.close() + + +def create_log_file(targetdir, lp, backup_type, server, include_secrets, + extra_info=None): + # create a summary file about the backup, which will get included in the + # tar file. This makes it easy for users to see what the backup involved, + # without having to untar the DB and interrogate it + f = open(os.path.join(targetdir, "backup.txt"), 'w') + try: + time_str = datetime.datetime.now().strftime('%Y-%b-%d %H:%M:%S') + f.write("Backup created %s\n" % time_str) + f.write("Using samba-tool version: %s\n" % lp.get('server string')) + f.write("Domain %s backup, using DC '%s'\n" % (backup_type, server)) + f.write("Backup for domain %s (NetBIOS), %s (DNS realm)\n" % + (lp.get('workgroup'), lp.get('realm').lower())) + f.write("Backup contains domain secrets: %s\n" % str(include_secrets)) + if extra_info: + f.write("%s\n" % extra_info) + finally: + f.close() + + +# Add a backup-specific marker to the DB with info that we'll use during +# the restore process +def add_backup_marker(samdb, marker, value): + m = ldb.Message() + m.dn = ldb.Dn(samdb, "@SAMBA_DSDB") + m[marker] = ldb.MessageElement(value, ldb.FLAG_MOD_ADD, marker) + samdb.modify(m) + + +def check_targetdir(logger, targetdir): + if targetdir is None: + raise CommandError('Target directory required') + + if not os.path.exists(targetdir): + logger.info('Creating targetdir %s...' % targetdir) + os.makedirs(targetdir) + elif not os.path.isdir(targetdir): + raise CommandError("%s is not a directory" % targetdir) + + +# For '--no-secrets' backups, this sets the Administrator user's password to a +# randomly-generated value. This is similar to the provision behaviour +def set_admin_password(logger, samdb): + """Sets a randomly generated password for the backup DB's admin user""" + + # match the admin user by RID + domainsid = samdb.get_domain_sid() + match_admin = "(objectsid=%s-%s)" % (domainsid, + security.DOMAIN_RID_ADMINISTRATOR) + search_expr = "(&(objectClass=user)%s)" % (match_admin,) + + # retrieve the admin username (just in case it's been renamed) + res = samdb.search(base=samdb.domain_dn(), scope=ldb.SCOPE_SUBTREE, + expression=search_expr) + username = str(res[0]['samaccountname']) + + adminpass = samba.generate_random_password(12, 32) + logger.info("Setting %s password in backup to: %s" % (username, adminpass)) + logger.info("Run 'samba-tool user setpassword %s' after restoring DB" % + username) + samdb.setpassword(search_expr, adminpass, force_change_at_next_login=False, + username=username) + + +class cmd_domain_backup_online(samba.netcmd.Command): + """Copy a running DC's current DB into a backup tar file. + + Takes a backup copy of the current domain from a running DC. If the domain + were to undergo a catastrophic failure, then the backup file can be used to + recover the domain. The backup created is similar to the DB that a new DC + would receive when it joins the domain. + + Note that: + - it's recommended to run 'samba-tool dbcheck' before taking a backup-file + and fix any errors it reports. + - all the domain's secrets are included in the backup file. + - although the DB contents can be untarred and examined manually, you need + to run 'samba-tool domain backup restore' before you can start a Samba DC + from the backup file.""" + + synopsis = "%prog --server= --targetdir=" + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("--server", help="The DC to backup", type=str), + Option("--targetdir", type=str, + help="Directory to write the backup file to"), + Option("--no-secrets", action="store_true", default=False, + help="Exclude secret values from the backup created"), + Option("--backend-store", type="choice", metavar="BACKENDSTORE", + choices=["tdb", "mdb"], + help="Specify the database backend to be used " + "(default is %s)" % get_default_backend_store()), + ] + + def run(self, sambaopts=None, credopts=None, server=None, targetdir=None, + no_secrets=False, backend_store=None): + logger = self.get_logger() + logger.setLevel(logging.DEBUG) + + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp) + + # Make sure we have all the required args. + if server is None: + raise CommandError('Server required') + + check_targetdir(logger, targetdir) + + tmpdir = tempfile.mkdtemp(dir=targetdir) + + # Run a clone join on the remote + include_secrets = not no_secrets + try: + ctx = join_clone(logger=logger, creds=creds, lp=lp, + include_secrets=include_secrets, server=server, + dns_backend='SAMBA_INTERNAL', targetdir=tmpdir, + backend_store=backend_store) + + # get the paths used for the clone, then drop the old samdb connection + paths = ctx.paths + del ctx + + # Get a free RID to use as the new DC's SID (when it gets restored) + remote_sam = SamDB(url='ldap://' + server, credentials=creds, + session_info=system_session(), lp=lp) + new_sid = get_sid_for_restore(remote_sam, logger) + realm = remote_sam.domain_dns_name() + + # Grab the remote DC's sysvol files and bundle them into a tar file + logger.info("Backing up sysvol files (via SMB)...") + sysvol_tar = os.path.join(tmpdir, 'sysvol.tar.gz') + smb_conn = smb_sysvol_conn(server, lp, creds) + backup_online(smb_conn, sysvol_tar, remote_sam.get_domain_sid()) + + # remove the default sysvol files created by the clone (we want to + # make sure we restore the sysvol.tar.gz files instead) + shutil.rmtree(paths.sysvol) + + # Edit the downloaded sam.ldb to mark it as a backup + samdb = SamDB(url=paths.samdb, session_info=system_session(), lp=lp, + flags=ldb.FLG_DONT_CREATE_DB) + time_str = get_timestamp() + add_backup_marker(samdb, "backupDate", time_str) + add_backup_marker(samdb, "sidForRestore", new_sid) + add_backup_marker(samdb, "backupType", "online") + + # ensure the admin user always has a password set (same as provision) + if no_secrets: + set_admin_password(logger, samdb) + + # Add everything in the tmpdir to the backup tar file + backup_file = backup_filepath(targetdir, realm, time_str) + create_log_file(tmpdir, lp, "online", server, include_secrets) + create_backup_tar(logger, tmpdir, backup_file) + finally: + shutil.rmtree(tmpdir) + + +class cmd_domain_backup_restore(cmd_fsmo_seize): + """Restore the domain's DB from a backup-file. + + This restores a previously backed up copy of the domain's DB on a new DC. + + Note that the restored DB will not contain the original DC that the backup + was taken from (or any other DCs in the original domain). Only the new DC + (specified by --newservername) will be present in the restored DB. + + Samba can then be started against the restored DB. Any existing DCs for the + domain should be shutdown before the new DC is started. Other DCs can then + be joined to the new DC to recover the network. + + Note that this command should be run as the root user - it will fail + otherwise.""" + + synopsis = ("%prog --backup-file= --targetdir= " + "--newservername=") + takes_options = [ + Option("--backup-file", help="Path to backup file", type=str), + Option("--targetdir", help="Path to write to", type=str), + Option("--newservername", help="Name for new server", type=str), + Option("--host-ip", type="string", metavar="IPADDRESS", + help="set IPv4 ipaddress"), + Option("--host-ip6", type="string", metavar="IP6ADDRESS", + help="set IPv6 ipaddress"), + Option("--site", help="Site to add the new server in", type=str), + ] + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + } + + def register_dns_zone(self, logger, samdb, lp, ntdsguid, host_ip, + host_ip6, site): + """ + Registers the new realm's DNS objects when a renamed domain backup + is restored. + """ + names = guess_names(lp) + domaindn = names.domaindn + forestdn = samdb.get_root_basedn().get_linearized() + dnsdomain = names.dnsdomain.lower() + dnsforest = dnsdomain + hostname = names.netbiosname.lower() + domainsid = dom_sid(samdb.get_domain_sid()) + dnsadmins_sid = get_dnsadmins_sid(samdb, domaindn) + domainguid = get_domainguid(samdb, domaindn) + + # work out the IP address to use for the new DC's DNS records + host_ip = determine_host_ip(logger, lp, host_ip) + host_ip6 = determine_host_ip6(logger, lp, host_ip6) + + if host_ip is None and host_ip6 is None: + raise CommandError('Please specify a host-ip for the new server') + + logger.info("DNS realm was renamed to %s" % dnsdomain) + logger.info("Populating DNS partitions for new realm...") + + # Add the DNS objects for the new realm (note: the backup clone already + # has the root server objects, so don't add them again) + fill_dns_data_partitions(samdb, domainsid, site, domaindn, + forestdn, dnsdomain, dnsforest, hostname, + host_ip, host_ip6, domainguid, ntdsguid, + dnsadmins_sid, add_root=False) + + def fix_old_dc_references(self, samdb): + """Fixes attributes that reference the old/removed DCs""" + + # we just want to fix up DB problems here that were introduced by us + # removing the old DCs. We restrict what we fix up so that the restored + # DB matches the backed-up DB as close as possible. (There may be other + # DB issues inherited from the backed-up DC, but it's not our place to + # silently try to fix them here). + samdb.transaction_start() + chk = dbcheck(samdb, quiet=True, fix=True, yes=False, + in_transaction=True) + + # fix up stale references to the old DC + setattr(chk, 'fix_all_old_dn_string_component_mismatch', 'ALL') + attrs = ['lastKnownParent', 'interSiteTopologyGenerator'] + + # fix-up stale one-way links that point to the old DC + setattr(chk, 'remove_plausible_deleted_DN_links', 'ALL') + attrs += ['msDS-NC-Replica-Locations'] + + cross_ncs_ctrl = 'search_options:1:2' + controls = ['show_deleted:1', cross_ncs_ctrl] + chk.check_database(controls=controls, attrs=attrs) + samdb.transaction_commit() + + def create_default_site(self, samdb, logger): + """Creates the default site, if it doesn't already exist""" + + sitename = DEFAULTSITE + search_expr = "(&(cn={0})(objectclass=site))".format(sitename) + res = samdb.search(samdb.get_config_basedn(), scope=ldb.SCOPE_SUBTREE, + expression=search_expr) + + if len(res) == 0: + logger.info("Creating default site '{0}'".format(sitename)) + sites.create_site(samdb, samdb.get_config_basedn(), sitename) + + return sitename + + def remove_backup_markers(self, samdb): + """Remove DB markers added by the backup process""" + + # check what markers we need to remove (this may vary) + markers = ['sidForRestore', 'backupRename', 'backupDate', 'backupType'] + res = samdb.search(base=ldb.Dn(samdb, "@SAMBA_DSDB"), + scope=ldb.SCOPE_BASE, + attrs=markers) + + # remove any markers that exist in the DB + m = ldb.Message() + m.dn = ldb.Dn(samdb, "@SAMBA_DSDB") + + for attr in markers: + if attr in res[0]: + m[attr] = ldb.MessageElement([], ldb.FLAG_MOD_DELETE, attr) + + samdb.modify(m) + + def get_backup_type(self, samdb): + res = samdb.search(base=ldb.Dn(samdb, "@SAMBA_DSDB"), + scope=ldb.SCOPE_BASE, + attrs=['backupRename', 'backupType']) + + # note that the backupType marker won't exist on backups created on + # v4.9. However, we can still infer the type, as only rename and + # online backups are supported on v4.9 + if 'backupType' in res[0]: + backup_type = str(res[0]['backupType']) + elif 'backupRename' in res[0]: + backup_type = "rename" + else: + backup_type = "online" + + return backup_type + + def save_uptodate_vectors(self, samdb, partitions): + """Ensures the UTDV used by DRS is correct after an offline backup""" + for nc in partitions: + # load the replUpToDateVector we *should* have + utdv = _dsdb_load_udv_v2(samdb, nc) + + # convert it to NDR format and write it into the DB + utdv_blob = drsblobs.replUpToDateVectorBlob() + utdv_blob.version = 2 + utdv_blob.ctr.cursors = utdv + utdv_blob.ctr.count = len(utdv) + new_value = ndr_pack(utdv_blob) + + m = ldb.Message() + m.dn = ldb.Dn(samdb, nc) + m["replUpToDateVector"] = ldb.MessageElement(new_value, + ldb.FLAG_MOD_REPLACE, + "replUpToDateVector") + samdb.modify(m) + + def run(self, sambaopts=None, credopts=None, backup_file=None, + targetdir=None, newservername=None, host_ip=None, host_ip6=None, + site=None): + if not (backup_file and os.path.exists(backup_file)): + raise CommandError('Backup file not found.') + if targetdir is None: + raise CommandError('Please specify a target directory') + # allow restoredc to install into a directory prepopulated by selftest + if (os.path.exists(targetdir) and os.listdir(targetdir) and + os.environ.get('SAMBA_SELFTEST') != '1'): + raise CommandError('Target directory is not empty') + if not newservername: + raise CommandError('Server name required') + + logger = logging.getLogger() + logger.setLevel(logging.DEBUG) + logger.addHandler(logging.StreamHandler(sys.stdout)) + + # ldapcmp prefers the server's netBIOS name in upper-case + newservername = newservername.upper() + + # extract the backup .tar to a temp directory + targetdir = os.path.abspath(targetdir) + tf = tarfile.open(backup_file) + tf.extractall(targetdir) + tf.close() + + # use the smb.conf that got backed up, by default (save what was + # actually backed up, before we mess with it) + smbconf = os.path.join(targetdir, 'etc', 'smb.conf') + shutil.copyfile(smbconf, smbconf + ".orig") + + # if a smb.conf was specified on the cmd line, then use that instead + cli_smbconf = sambaopts.get_loadparm_path() + if cli_smbconf: + logger.info("Using %s as restored domain's smb.conf" % cli_smbconf) + shutil.copyfile(cli_smbconf, smbconf) + + lp = samba.param.LoadParm() + lp.load(smbconf) + + # open a DB connection to the restored DB + private_dir = os.path.join(targetdir, 'private') + samdb_path = os.path.join(private_dir, 'sam.ldb') + samdb = SamDB(url=samdb_path, session_info=system_session(), lp=lp, + flags=ldb.FLG_DONT_CREATE_DB) + backup_type = self.get_backup_type(samdb) + + if site is None: + # There's no great way to work out the correct site to add the + # restored DC to. By default, add it to Default-First-Site-Name, + # creating the site if it doesn't already exist + site = self.create_default_site(samdb, logger) + logger.info("Adding new DC to site '{0}'".format(site)) + + # read the naming contexts out of the DB + res = samdb.search(base="", scope=ldb.SCOPE_BASE, + attrs=['namingContexts']) + ncs = [str(r) for r in res[0].get('namingContexts')] + + # for offline backups we need to make sure the upToDateness info + # contains the invocation-ID and highest-USN of the DC we backed up. + # Otherwise replication propagation dampening won't correctly filter + # objects created by that DC + if backup_type == "offline": + self.save_uptodate_vectors(samdb, ncs) + + # Create account using the join_add_objects function in the join object + # We need namingContexts, account control flags, and the sid saved by + # the backup process. + creds = credopts.get_credentials(lp) + ctx = DCJoinContext(logger, creds=creds, lp=lp, site=site, + forced_local_samdb=samdb, + netbios_name=newservername) + ctx.nc_list = ncs + ctx.full_nc_list = ncs + ctx.userAccountControl = (samba.dsdb.UF_SERVER_TRUST_ACCOUNT | + samba.dsdb.UF_TRUSTED_FOR_DELEGATION) + + # rewrite the smb.conf to make sure it uses the new targetdir settings. + # (This doesn't update all filepaths in a customized config, but it + # corrects the same paths that get set by a new provision) + logger.info('Updating basic smb.conf settings...') + make_smbconf(smbconf, newservername, ctx.domain_name, + ctx.realm, targetdir, lp=lp, + serverrole="active directory domain controller") + + # Get the SID saved by the backup process and create account + res = samdb.search(base=ldb.Dn(samdb, "@SAMBA_DSDB"), + scope=ldb.SCOPE_BASE, + attrs=['sidForRestore']) + sid = res[0].get('sidForRestore')[0] + logger.info('Creating account with SID: ' + str(sid)) + try: + ctx.join_add_objects(specified_sid=dom_sid(str(sid))) + except LdbError as e: + (enum, emsg) = e.args + if enum != ldb.ERR_CONSTRAINT_VIOLATION: + raise + + dup_res = [] + try: + dup_res = samdb.search(base=ldb.Dn(samdb, "" % sid), + scope=ldb.SCOPE_BASE, + attrs=['objectGUID'], + controls=["show_deleted:0", + "show_recycled:0"]) + except LdbError as dup_e: + (dup_enum, _) = dup_e.args + if dup_enum != ldb.ERR_NO_SUCH_OBJECT: + raise + + if (len(dup_res) != 1): + raise + + objectguid = samdb.schema_format_value("objectGUID", + dup_res[0]["objectGUID"][0]) + objectguid = objectguid.decode('utf-8') + logger.error("The RID Pool on the source DC for the backup in %s " + "may be corrupt " + "or in conflict with SIDs already allocated " + "in the domain. " % backup_file) + logger.error("Running 'samba-tool dbcheck' on the source " + "DC (and obtaining a new backup) may correct the issue.") + logger.error("Alternatively please obtain a new backup " + "against a different DC.") + logger.error("The SID we wish to use (%s) is recorded in " + "@SAMBA_DSDB as the sidForRestore attribute." + % sid) + + raise CommandError("Domain restore failed because there " + "is already an existing object (%s) " + "with SID %s and objectGUID %s. " + "This conflicts with " + "the new DC account we want to add " + "for the restored domain. " % ( + dup_res[0].dn, sid, objectguid)) + + m = ldb.Message() + m.dn = ldb.Dn(samdb, '@ROOTDSE') + ntds_guid = str(ctx.ntds_guid) + m["dsServiceName"] = ldb.MessageElement("" % ntds_guid, + ldb.FLAG_MOD_REPLACE, + "dsServiceName") + samdb.modify(m) + + # if we renamed the backed-up domain, then we need to add the DNS + # objects for the new realm (we do this in the restore, now that we + # know the new DC's IP address) + if backup_type == "rename": + self.register_dns_zone(logger, samdb, lp, ctx.ntds_guid, + host_ip, host_ip6, site) + + secrets_path = os.path.join(private_dir, 'secrets.ldb') + secrets_ldb = Ldb(secrets_path, session_info=system_session(), lp=lp, + flags=ldb.FLG_DONT_CREATE_DB) + secretsdb_self_join(secrets_ldb, domain=ctx.domain_name, + realm=ctx.realm, dnsdomain=ctx.dnsdomain, + netbiosname=ctx.myname, domainsid=ctx.domsid, + machinepass=ctx.acct_pass, + key_version_number=ctx.key_version_number, + secure_channel_type=misc.SEC_CHAN_BDC) + + # Seize DNS roles + domain_dn = samdb.domain_dn() + forest_dn = samba.dn_from_dns_name(samdb.forest_dns_name()) + dns_roles = [("domaindns", domain_dn), + ("forestdns", forest_dn)] + for role, dn in dns_roles: + if dn in ncs: + self.seize_dns_role(role, samdb, None, None, None, force=True) + + # Seize other roles + for role in ['rid', 'pdc', 'naming', 'infrastructure', 'schema']: + self.seize_role(role, samdb, force=True) + + # Get all DCs and remove them (this ensures these DCs cannot + # replicate because they will not have a password) + search_expr = "(&(objectClass=Server)(serverReference=*))" + res = samdb.search(samdb.get_config_basedn(), scope=ldb.SCOPE_SUBTREE, + expression=search_expr) + for m in res: + cn = str(m.get('cn')[0]) + if cn != newservername: + remove_dc(samdb, logger, cn) + + # Remove the repsFrom and repsTo from each NC to ensure we do + # not try (and fail) to talk to the old DCs + for nc in ncs: + msg = ldb.Message() + msg.dn = ldb.Dn(samdb, nc) + + msg["repsFrom"] = ldb.MessageElement([], + ldb.FLAG_MOD_REPLACE, + "repsFrom") + msg["repsTo"] = ldb.MessageElement([], + ldb.FLAG_MOD_REPLACE, + "repsTo") + samdb.modify(msg) + + # Update the krbtgt passwords twice, ensuring no tickets from + # the old domain are valid + update_krbtgt_account_password(samdb) + update_krbtgt_account_password(samdb) + + # restore the sysvol directory from the backup tar file, including the + # original NTACLs. Note that the backup_restore() will fail if not root + sysvol_tar = os.path.join(targetdir, 'sysvol.tar.gz') + dest_sysvol_dir = lp.get('path', 'sysvol') + if not os.path.exists(dest_sysvol_dir): + os.makedirs(dest_sysvol_dir) + backup_restore(sysvol_tar, dest_sysvol_dir, samdb, smbconf) + os.remove(sysvol_tar) + + # fix up any stale links to the old DCs we just removed + logger.info("Fixing up any remaining references to the old DCs...") + self.fix_old_dc_references(samdb) + + # Remove DB markers added by the backup process + self.remove_backup_markers(samdb) + + logger.info("Backup file successfully restored to %s" % targetdir) + logger.info("Please check the smb.conf settings are correct before " + "starting samba.") + + +class cmd_domain_backup_rename(samba.netcmd.Command): + """Copy a running DC's DB to backup file, renaming the domain in the process. + + Where is the new domain's NetBIOS name, and is + the new domain's realm in DNS form. + + This is similar to 'samba-tool backup online' in that it clones the DB of a + running DC. However, this option also renames all the domain entries in the + DB. Renaming the domain makes it possible to restore and start a new Samba + DC without it interfering with the existing Samba domain. In other words, + you could use this option to clone your production samba domain and restore + it to a separate pre-production environment that won't overlap or interfere + with the existing production Samba domain. + + Note that: + - it's recommended to run 'samba-tool dbcheck' before taking a backup-file + and fix any errors it reports. + - all the domain's secrets are included in the backup file. + - although the DB contents can be untarred and examined manually, you need + to run 'samba-tool domain backup restore' before you can start a Samba DC + from the backup file. + - GPO and sysvol information will still refer to the old realm and will + need to be updated manually. + - if you specify 'keep-dns-realm', then the DNS records will need updating + in order to work (they will still refer to the old DC's IP instead of the + new DC's address). + - we recommend that you only use this option if you know what you're doing. + """ + + synopsis = ("%prog --server= " + "--targetdir=") + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("--server", help="The DC to backup", type=str), + Option("--targetdir", help="Directory to write the backup file", + type=str), + Option("--keep-dns-realm", action="store_true", default=False, + help="Retain the DNS entries for the old realm in the backup"), + Option("--no-secrets", action="store_true", default=False, + help="Exclude secret values from the backup created"), + Option("--backend-store", type="choice", metavar="BACKENDSTORE", + choices=["tdb", "mdb"], + help="Specify the database backend to be used " + "(default is %s)" % get_default_backend_store()), + ] + + takes_args = ["new_domain_name", "new_dns_realm"] + + def update_dns_root(self, logger, samdb, old_realm, delete_old_dns): + """Updates dnsRoot for the partition objects to reflect the rename""" + + # lookup the crossRef objects that hold the old realm's dnsRoot + partitions_dn = samdb.get_partitions_dn() + res = samdb.search(base=partitions_dn, scope=ldb.SCOPE_ONELEVEL, + attrs=["dnsRoot"], + expression='(&(objectClass=crossRef)(dnsRoot=*))') + new_realm = samdb.domain_dns_name() + + # go through and add the new realm + for res_msg in res: + # dnsRoot can be multi-valued, so only look for the old realm + for dns_root in res_msg["dnsRoot"]: + dns_root = str(dns_root) + dn = res_msg.dn + if old_realm in dns_root: + new_dns_root = re.sub('%s$' % old_realm, new_realm, + dns_root) + logger.info("Adding %s dnsRoot to %s" % (new_dns_root, dn)) + + m = ldb.Message() + m.dn = dn + m["dnsRoot"] = ldb.MessageElement(new_dns_root, + ldb.FLAG_MOD_ADD, + "dnsRoot") + samdb.modify(m) + + # optionally remove the dnsRoot for the old realm + if delete_old_dns: + logger.info("Removing %s dnsRoot from %s" % (dns_root, + dn)) + m["dnsRoot"] = ldb.MessageElement(dns_root, + ldb.FLAG_MOD_DELETE, + "dnsRoot") + samdb.modify(m) + + # Updates the CN=,CN=Partitions,CN=Configuration,... object to + # reflect the domain rename + def rename_domain_partition(self, logger, samdb, new_netbios_name): + """Renames the domain partition object and updates its nETBIOSName""" + + # lookup the crossRef object that holds the nETBIOSName (nCName has + # already been updated by this point, but the netBIOS hasn't) + base_dn = samdb.get_default_basedn() + nc_name = ldb.binary_encode(str(base_dn)) + partitions_dn = samdb.get_partitions_dn() + res = samdb.search(base=partitions_dn, scope=ldb.SCOPE_ONELEVEL, + attrs=["nETBIOSName"], + expression='ncName=%s' % nc_name) + + logger.info("Changing backup domain's NetBIOS name to %s" % + new_netbios_name) + m = ldb.Message() + m.dn = res[0].dn + m["nETBIOSName"] = ldb.MessageElement(new_netbios_name, + ldb.FLAG_MOD_REPLACE, + "nETBIOSName") + samdb.modify(m) + + # renames the object itself to reflect the change in domain + new_dn = "CN=%s,%s" % (new_netbios_name, partitions_dn) + logger.info("Renaming %s --> %s" % (res[0].dn, new_dn)) + samdb.rename(res[0].dn, new_dn, controls=['relax:0']) + + def delete_old_dns_zones(self, logger, samdb, old_realm): + # remove the top-level DNS entries for the old realm + basedn = samdb.get_default_basedn() + dn = "DC=%s,CN=MicrosoftDNS,DC=DomainDnsZones,%s" % (old_realm, basedn) + logger.info("Deleting old DNS zone %s" % dn) + samdb.delete(dn, ["tree_delete:1"]) + + forestdn = samdb.get_root_basedn().get_linearized() + dn = "DC=_msdcs.%s,CN=MicrosoftDNS,DC=ForestDnsZones,%s" % (old_realm, + forestdn) + logger.info("Deleting old DNS zone %s" % dn) + samdb.delete(dn, ["tree_delete:1"]) + + def fix_old_dn_attributes(self, samdb): + """Fixes attributes (i.e. objectCategory) that still use the old DN""" + + samdb.transaction_start() + # Just fix any mismatches in DN detected (leave any other errors) + chk = dbcheck(samdb, quiet=True, fix=True, yes=False, + in_transaction=True) + # fix up incorrect objectCategory/etc attributes + setattr(chk, 'fix_all_old_dn_string_component_mismatch', 'ALL') + cross_ncs_ctrl = 'search_options:1:2' + controls = ['show_deleted:1', cross_ncs_ctrl] + chk.check_database(controls=controls) + samdb.transaction_commit() + + def run(self, new_domain_name, new_dns_realm, sambaopts=None, + credopts=None, server=None, targetdir=None, keep_dns_realm=False, + no_secrets=False, backend_store=None): + logger = self.get_logger() + logger.setLevel(logging.INFO) + + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp) + + # Make sure we have all the required args. + if server is None: + raise CommandError('Server required') + + check_targetdir(logger, targetdir) + + delete_old_dns = not keep_dns_realm + + new_dns_realm = new_dns_realm.lower() + new_domain_name = new_domain_name.upper() + + new_base_dn = samba.dn_from_dns_name(new_dns_realm) + logger.info("New realm for backed up domain: %s" % new_dns_realm) + logger.info("New base DN for backed up domain: %s" % new_base_dn) + logger.info("New domain NetBIOS name: %s" % new_domain_name) + + tmpdir = tempfile.mkdtemp(dir=targetdir) + + # setup a join-context for cloning the remote server + include_secrets = not no_secrets + ctx = DCCloneAndRenameContext(new_base_dn, new_domain_name, + new_dns_realm, logger=logger, + creds=creds, lp=lp, + include_secrets=include_secrets, + dns_backend='SAMBA_INTERNAL', + server=server, targetdir=tmpdir, + backend_store=backend_store) + + # sanity-check we're not "renaming" the domain to the same values + old_domain = ctx.domain_name + if old_domain == new_domain_name: + shutil.rmtree(tmpdir) + raise CommandError("Cannot use the current domain NetBIOS name.") + + old_realm = ctx.realm + if old_realm == new_dns_realm: + shutil.rmtree(tmpdir) + raise CommandError("Cannot use the current domain DNS realm.") + + # do the clone/rename + ctx.do_join() + + # get the paths used for the clone, then drop the old samdb connection + del ctx.local_samdb + paths = ctx.paths + + # get a free RID to use as the new DC's SID (when it gets restored) + remote_sam = SamDB(url='ldap://' + server, credentials=creds, + session_info=system_session(), lp=lp) + new_sid = get_sid_for_restore(remote_sam, logger) + + # Grab the remote DC's sysvol files and bundle them into a tar file. + # Note we end up with 2 sysvol dirs - the original domain's files (that + # use the old realm) backed here, as well as default files generated + # for the new realm as part of the clone/join. + sysvol_tar = os.path.join(tmpdir, 'sysvol.tar.gz') + smb_conn = smb_sysvol_conn(server, lp, creds) + backup_online(smb_conn, sysvol_tar, remote_sam.get_domain_sid()) + + # connect to the local DB (making sure we use the new/renamed config) + lp.load(paths.smbconf) + samdb = SamDB(url=paths.samdb, session_info=system_session(), lp=lp, + flags=ldb.FLG_DONT_CREATE_DB) + + # Edit the cloned sam.ldb to mark it as a backup + time_str = get_timestamp() + add_backup_marker(samdb, "backupDate", time_str) + add_backup_marker(samdb, "sidForRestore", new_sid) + add_backup_marker(samdb, "backupRename", old_realm) + add_backup_marker(samdb, "backupType", "rename") + + # fix up the DNS objects that are using the old dnsRoot value + self.update_dns_root(logger, samdb, old_realm, delete_old_dns) + + # update the netBIOS name and the Partition object for the domain + self.rename_domain_partition(logger, samdb, new_domain_name) + + if delete_old_dns: + self.delete_old_dns_zones(logger, samdb, old_realm) + + logger.info("Fixing DN attributes after rename...") + self.fix_old_dn_attributes(samdb) + + # ensure the admin user always has a password set (same as provision) + if no_secrets: + set_admin_password(logger, samdb) + + # Add everything in the tmpdir to the backup tar file + backup_file = backup_filepath(targetdir, new_dns_realm, time_str) + create_log_file(tmpdir, lp, "rename", server, include_secrets, + "Original domain %s (NetBIOS), %s (DNS realm)" % + (old_domain, old_realm)) + create_backup_tar(logger, tmpdir, backup_file) + + shutil.rmtree(tmpdir) + + +class cmd_domain_backup_offline(samba.netcmd.Command): + """Backup the local domain directories safely into a tar file. + + Takes a backup copy of the current domain from the local files on disk, + with proper locking of the DB to ensure consistency. If the domain were to + undergo a catastrophic failure, then the backup file can be used to recover + the domain. + + An offline backup differs to an online backup in the following ways: + - a backup can be created even if the DC isn't currently running. + - includes non-replicated attributes that an online backup wouldn't store. + - takes a copy of the raw database files, which has the risk that any + hidden problems in the DB are preserved in the backup.""" + + synopsis = "%prog [options]" + takes_optiongroups = { + "sambaopts": options.SambaOptions, + } + + takes_options = [ + Option("--targetdir", + help="Output directory (required)", + type=str), + ] + + backup_ext = '.bak-offline' + + def offline_tdb_copy(self, path): + backup_path = path + self.backup_ext + try: + tdb_copy(path, backup_path, readonly=True) + except CalledProcessError as copy_err: + # If the copy didn't work, check if it was caused by an EINVAL + # error on opening the DB. If so, it's a mutex locked database, + # which we can safely ignore. + try: + tdb.open(path) + except Exception as e: + if hasattr(e, 'errno') and e.errno == errno.EINVAL: + return + raise e + raise copy_err + + except FileNotFoundError as e: + # tdbbackup tool was not found. + raise CommandError(e.strerror, e) + + if not os.path.exists(backup_path): + s = "tdbbackup said backup succeeded but {0} not found" + raise CommandError(s.format(backup_path)) + + + def offline_mdb_copy(self, path): + mdb_copy(path, path + self.backup_ext) + + # Secrets databases are a special case: a transaction must be started + # on the secrets.ldb file before backing up that file and secrets.tdb + def backup_secrets(self, private_dir, lp, logger): + secrets_path = os.path.join(private_dir, 'secrets') + secrets_obj = Ldb(secrets_path + '.ldb', lp=lp, + flags=ldb.FLG_DONT_CREATE_DB) + logger.info('Starting transaction on ' + secrets_path) + secrets_obj.transaction_start() + self.offline_tdb_copy(secrets_path + '.ldb') + self.offline_tdb_copy(secrets_path + '.tdb') + secrets_obj.transaction_cancel() + + # sam.ldb must have a transaction started on it before backing up + # everything in sam.ldb.d with the appropriate backup function. + # + # Obtains the sidForRestore (SID for the new DC) and returns it + # from under the transaction + def backup_smb_dbs(self, private_dir, samdb, lp, logger): + sam_ldb_path = os.path.join(private_dir, 'sam.ldb') + + # First, determine if DB backend is MDB. Assume not unless there is a + # 'backendStore' attribute on @PARTITION containing the text 'mdb' + store_label = "backendStore" + res = samdb.search(base="@PARTITION", scope=ldb.SCOPE_BASE, + attrs=[store_label]) + mdb_backend = store_label in res[0] and str(res[0][store_label][0]) == 'mdb' + + # This is needed to keep this variable in scope until the end + # of the transaction. + res_iterator = None + + copy_function = None + if mdb_backend: + logger.info('MDB backend detected. Using mdb backup function.') + copy_function = self.offline_mdb_copy + + # We can't backup with a write transaction open, so get a + # read lock with a search_iterator(). + # + # We have tests in lib/ldb/tests/python/api.py that the + # search iterator takes a read lock effective against a + # transaction. This in turn will ensure there are no + # transactions on either the main or sub-database, even if + # the read locks were not enforced globally (they are). + res_iterator = samdb.search_iterator() + else: + logger.info('Starting transaction on ' + sam_ldb_path) + copy_function = self.offline_tdb_copy + samdb.transaction_start() + + logger.info(' backing up ' + sam_ldb_path) + self.offline_tdb_copy(sam_ldb_path) + sam_ldb_d = sam_ldb_path + '.d' + for sam_file in os.listdir(sam_ldb_d): + sam_file = os.path.join(sam_ldb_d, sam_file) + if sam_file.endswith('.ldb'): + logger.info(' backing up locked/related file ' + sam_file) + copy_function(sam_file) + elif sam_file.endswith('.tdb'): + logger.info(' tdbbackup of locked/related file ' + sam_file) + self.offline_tdb_copy(sam_file) + else: + logger.info(' copying locked/related file ' + sam_file) + shutil.copyfile(sam_file, sam_file + self.backup_ext) + + sid = get_sid_for_restore(samdb, logger) + + if mdb_backend: + # Delete the iterator, release the read lock + del(res_iterator) + else: + samdb.transaction_cancel() + + return sid + + # Find where a path should go in the fixed backup archive structure. + def get_arc_path(self, path, conf_paths): + backup_dirs = {"private": conf_paths.private_dir, + "state": conf_paths.state_dir, + "etc": os.path.dirname(conf_paths.smbconf)} + matching_dirs = [(_, p) for (_, p) in backup_dirs.items() if + path.startswith(p)] + arc_path, fs_path = matching_dirs[0] + + # If more than one directory is a parent of this path, then at least + # one configured path is a subdir of another. Use closest match. + if len(matching_dirs) > 1: + arc_path, fs_path = max(matching_dirs, key=lambda p: len(p[1])) + arc_path += path[len(fs_path):] + + return arc_path + + def run(self, sambaopts=None, targetdir=None): + + logger = logging.getLogger() + logger.setLevel(logging.DEBUG) + logger.addHandler(logging.StreamHandler(sys.stdout)) + + # Get the absolute paths of all the directories we're going to backup + lp = sambaopts.get_loadparm() + + paths = samba.provision.provision_paths_from_lp(lp, lp.get('realm')) + if not (paths.samdb and os.path.exists(paths.samdb)): + logger.error("No database found at {0}".format(paths.samdb)) + raise CommandError('Please check you are root, and ' + + 'are running this command on an AD DC') + + check_targetdir(logger, targetdir) + + # Iterating over the directories in this specific order ensures that + # when the private directory contains hardlinks that are also contained + # in other directories to be backed up (such as in paths.binddns_dir), + # the hardlinks in the private directory take precedence. + backup_dirs = [paths.private_dir, paths.state_dir, + os.path.dirname(paths.smbconf)] # etc dir + logger.info('running backup on dirs: {0}'.format(' '.join(backup_dirs))) + + # Recursively get all file paths in the backup directories + all_files = [] + all_stats = set() + for backup_dir in backup_dirs: + for (working_dir, _, filenames) in os.walk(backup_dir): + if working_dir.startswith(paths.sysvol): + continue + if working_dir.endswith('.sock') or '.sock/' in working_dir: + continue + # The BIND DNS database can be regenerated, so it doesn't need + # to be backed up. + if working_dir.startswith(os.path.join(paths.binddns_dir, 'dns')): + continue + + for filename in filenames: + full_path = os.path.join(working_dir, filename) + + # Ignore files that have already been added. This prevents + # duplicates if one backup dir is a subdirectory of another, + # or if backup dirs contain hardlinks. + try: + s = os.stat(full_path, follow_symlinks=False) + except FileNotFoundError: + logger.warning(f"{full_path} does not exist!") + continue + + if (s.st_ino, s.st_dev) in all_stats: + continue + + # Assume existing backup files are from a previous backup. + # Delete and ignore. + if filename.endswith(self.backup_ext): + os.remove(full_path) + continue + + # Sock files are autogenerated at runtime, ignore. + if filename.endswith('.sock'): + continue + + all_files.append(full_path) + all_stats.add((s.st_ino, s.st_dev)) + + # We would prefer to open with FLG_RDONLY but then we can't + # start a transaction which is the strong isolation we want + # for the backup. + samdb = SamDB(url=paths.samdb, session_info=system_session(), lp=lp, + flags=ldb.FLG_DONT_CREATE_DB) + + # Backup secrets, sam.ldb and their downstream files + self.backup_secrets(paths.private_dir, lp, logger) + sid = self.backup_smb_dbs(paths.private_dir, samdb, lp, logger) + + # Get the domain SID so we can later place it in the backup + dom_sid_str = samdb.get_domain_sid() + dom_sid = security.dom_sid(dom_sid_str) + + # Close the original samdb, to avoid any confusion, we will + # not use this any more as the data has all been copied under + # the transaction + samdb = None + + # Open the new backed up samdb, flag it as backed up, and write + # the next SID so the restore tool can add objects. We use + # options=["modules:"] here to prevent any modules from loading. + # WARNING: Don't change this code unless you know what you're doing. + # Writing to a .bak file only works because the DN being + # written to happens to be top level. + samdb = Ldb(url=paths.samdb + self.backup_ext, + session_info=system_session(), lp=lp, + options=["modules:"], flags=ldb.FLG_DONT_CREATE_DB) + time_str = get_timestamp() + add_backup_marker(samdb, "backupDate", time_str) + add_backup_marker(samdb, "sidForRestore", sid) + add_backup_marker(samdb, "backupType", "offline") + + # Close the backed up samdb + samdb = None + + # Now handle all the LDB and TDB files that are not linked to + # anything else. Use transactions for LDBs. + for path in all_files: + if not os.path.exists(path + self.backup_ext): + if path.endswith('.ldb'): + logger.info('Starting transaction on solo db: ' + path) + ldb_obj = Ldb(path, lp=lp, flags=ldb.FLG_DONT_CREATE_DB) + ldb_obj.transaction_start() + logger.info(' running tdbbackup on the same file') + self.offline_tdb_copy(path) + ldb_obj.transaction_cancel() + elif path.endswith('.tdb'): + logger.info('running tdbbackup on lone tdb file ' + path) + self.offline_tdb_copy(path) + + # Now make the backup tar file and add all + # backed up files and any other files to it. + temp_tar_dir = tempfile.mkdtemp(dir=targetdir, + prefix='INCOMPLETEsambabackupfile') + temp_tar_name = os.path.join(temp_tar_dir, "samba-backup.tar.bz2") + tar = tarfile.open(temp_tar_name, 'w:bz2') + + logger.info('running offline ntacl backup of sysvol') + sysvol_tar_fn = 'sysvol.tar.gz' + sysvol_tar = os.path.join(temp_tar_dir, sysvol_tar_fn) + backup_offline(paths.sysvol, sysvol_tar, paths.smbconf, dom_sid) + tar.add(sysvol_tar, sysvol_tar_fn) + os.remove(sysvol_tar) + + create_log_file(temp_tar_dir, lp, "offline", "localhost", True) + backup_fn = os.path.join(temp_tar_dir, "backup.txt") + tar.add(backup_fn, os.path.basename(backup_fn)) + os.remove(backup_fn) + + logger.info('building backup tar') + for path in all_files: + arc_path = self.get_arc_path(path, paths) + + if os.path.exists(path + self.backup_ext): + logger.info(' adding backup ' + arc_path + self.backup_ext + + ' to tar and deleting file') + tar.add(path + self.backup_ext, arcname=arc_path) + os.remove(path + self.backup_ext) + elif path.endswith('.ldb') or path.endswith('.tdb'): + logger.info(' skipping ' + arc_path) + else: + logger.info(' adding misc file ' + arc_path) + tar.add(path, arcname=arc_path) + + tar.close() + os.rename(temp_tar_name, + os.path.join(targetdir, + 'samba-backup-{0}.tar.bz2'.format(time_str))) + os.rmdir(temp_tar_dir) + logger.info('Backup succeeded.') + + +class cmd_domain_backup(samba.netcmd.SuperCommand): + """Create or restore a backup of the domain.""" + subcommands = {'offline': cmd_domain_backup_offline(), + 'online': cmd_domain_backup_online(), + 'rename': cmd_domain_backup_rename(), + 'restore': cmd_domain_backup_restore()} diff --git a/python/samba/netcmd/domain/claim/__init__.py b/python/samba/netcmd/domain/claim/__init__.py new file mode 100644 index 0000000..de7c4bb --- /dev/null +++ b/python/samba/netcmd/domain/claim/__init__.py @@ -0,0 +1,35 @@ +# Unix SMB/CIFS implementation. +# +# claim management +# +# Copyright (C) Catalyst.Net Ltd. 2023 +# +# Written by Rob van der Linde +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +from samba.netcmd import SuperCommand + +from .claim_type import cmd_domain_claim_claim_type +from .value_type import cmd_domain_claim_value_type + + +class cmd_domain_claim(SuperCommand): + """Manage claims on the domain.""" + + subcommands = { + "claim-type": cmd_domain_claim_claim_type(), + "value-type": cmd_domain_claim_value_type(), + } diff --git a/python/samba/netcmd/domain/claim/claim_type.py b/python/samba/netcmd/domain/claim/claim_type.py new file mode 100644 index 0000000..c0825c6 --- /dev/null +++ b/python/samba/netcmd/domain/claim/claim_type.py @@ -0,0 +1,361 @@ +# Unix SMB/CIFS implementation. +# +# claim type management +# +# Copyright (C) Catalyst.Net Ltd. 2023 +# +# Written by Rob van der Linde +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import binascii +import os + +import samba.getopt as options +from samba.netcmd import Command, CommandError, Option, SuperCommand +from samba.netcmd.domain.models import AttributeSchema, ClassSchema,\ + ClaimType, ValueType +from samba.netcmd.domain.models.exceptions import ModelError + + +class cmd_domain_claim_claim_type_create(Command): + """Create claim types on the domain.""" + + synopsis = "%prog -H [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "hostopts": options.HostOptions, + } + + takes_options = [ + Option("--attribute", help="Attribute of claim type to create (required).", + dest="attribute_name", action="store", type=str, required=True), + Option("--class", help="Object classes to set claim type to.", + dest="class_names", action="append", type=str, required=True), + Option("--name", help="Optional display name or use attribute name.", + dest="name", action="store", type=str), + Option("--description", + help="Optional description or use from attribute.", + dest="description", action="store", type=str), + Option("--disable", help="Disable claim type.", + dest="disable", action="store_true"), + Option("--enable", help="Enable claim type.", + dest="enable", action="store_true"), + Option("--protect", + help="Protect claim type from accidental deletion.", + dest="protect", action="store_true"), + Option("--unprotect", + help="Unprotect claim type from accidental deletion.", + dest="unprotect", action="store_true") + ] + + def run(self, hostopts=None, sambaopts=None, credopts=None, name=None, + attribute_name=None, class_names=None, description=None, + disable=None, enable=None, protect=None, unprotect=None): + + # mutually exclusive attributes + if enable and disable: + raise CommandError("--enable and --disable cannot be used together.") + if protect and unprotect: + raise CommandError("--protect and --unprotect cannot be used together.") + + ldb = self.ldb_connect(hostopts, sambaopts, credopts) + + display_name = name or attribute_name + try: + claim_type = ClaimType.get(ldb, display_name=display_name) + except ModelError as e: + raise CommandError(e) + + # Check if a claim type with this display name already exists. + # Note: you can register the same claim type under another display name. + if claim_type: + raise CommandError(f"Claim type {display_name} already exists, " + "but you can use --name to use another name.") + + # Lookup attribute and class names in schema. + try: + applies_to = [ClassSchema.lookup(ldb, name) for name in class_names] + attribute = AttributeSchema.lookup(ldb, attribute_name) + value_type = ValueType.lookup(ldb, attribute) + except (LookupError, ModelError, ValueError) as e: + raise CommandError(e) + + # Generate the new Claim Type cn. + # Windows creates a random number here containing 16 hex digits. + # We can achieve something similar using urandom(8) + instance = binascii.hexlify(os.urandom(8)).decode() + cn = f"ad://ext/{display_name}:{instance}" + + # adminDescription should be present but still have a fallback. + if description is None: + description = attribute.admin_description or display_name + + # claim_is_value_space_restricted is always False because we don't + # yet support creating claims with a restricted possible values list. + claim_type = ClaimType( + cn=cn, + description=description, + display_name=display_name, + enabled=not disable, + claim_attribute_source=attribute.dn, + claim_is_single_valued=attribute.is_single_valued, + claim_is_value_space_restricted=False, + claim_source_type="AD", + claim_type_applies_to_class=[obj.dn for obj in applies_to], + claim_value_type=value_type.claim_value_type, + ) + + # Either --enable will be set or --disable but never both. + # The default if both are missing is enabled=True. + if enable is not None: + claim_type.enabled = enable + else: + claim_type.enabled = not disable + + # Create claim type + try: + claim_type.save(ldb) + + if protect: + claim_type.protect(ldb) + except ModelError as e: + raise CommandError(e) + + # Claim type created successfully. + self.outf.write(f"Created claim type: {display_name}") + if attribute_name != display_name: + self.outf.write(f" ({attribute_name})\n") + else: + self.outf.write("\n") + + +class cmd_domain_claim_claim_type_modify(Command): + """Modify claim types on the domain.""" + + synopsis = "%prog -H [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "hostopts": options.HostOptions, + } + + takes_options = [ + Option("--name", help="Display name of claim type to modify (required).", + dest="name", action="store", type=str, required=True), + Option("--class", help="Object classes to set claim type to.", + dest="class_names", action="append", type=str), + Option("--description", help="Set the claim type description.", + dest="description", action="store", type=str), + Option("--enable", + help="Enable claim type.", + dest="enable", action="store_true"), + Option("--disable", + help="Disable claim type.", + dest="disable", action="store_true"), + Option("--protect", + help="Protect claim type from accidental deletion.", + dest="protect", action="store_true"), + Option("--unprotect", + help="Unprotect claim type from accidental deletion.", + dest="unprotect", action="store_true") + ] + + def run(self, hostopts=None, sambaopts=None, credopts=None, name=None, + class_names=None, description=None, enable=None, disable=None, + protect=None, unprotect=None): + + if enable and disable: + raise CommandError("--enable and --disable cannot be used together.") + if protect and unprotect: + raise CommandError("--protect and --unprotect cannot be used together.") + + ldb = self.ldb_connect(hostopts, sambaopts, credopts) + + try: + claim_type = ClaimType.get(ldb, display_name=name) + except ModelError as e: + raise CommandError(e) + + # Check if claim type exists. + if not claim_type: + raise CommandError(f"Claim type {name} not found.") + + # Either --enable will be set or --disable but never both. + if enable: + claim_type.enabled = True + elif disable: + claim_type.enabled = False + + # Update the description. + if description is not None: + claim_type.description = description + + # Change class names for claim type. + if class_names is not None: + try: + applies_to = [ClassSchema.lookup(ldb, name) + for name in class_names] + except (LookupError, ValueError) as e: + raise CommandError(e) + + claim_type.claim_type_applies_to_class = [obj.dn for obj in applies_to] + + # Update claim type. + try: + claim_type.save(ldb) + + if protect: + claim_type.protect(ldb) + elif unprotect: + claim_type.unprotect(ldb) + except ModelError as e: + raise CommandError(e) + + # Claim type updated successfully. + self.outf.write(f"Updated claim type: {name}\n") + + +class cmd_domain_claim_claim_type_delete(Command): + """Delete claim types on the domain.""" + + synopsis = "%prog -H [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "hostopts": options.HostOptions, + } + + takes_options = [ + Option("--name", help="Display name of claim type to delete (required).", + dest="name", action="store", type=str, required=True), + Option("--force", help="Force claim type delete even if it is protected.", + dest="force", action="store_true") + ] + + def run(self, hostopts=None, sambaopts=None, credopts=None, + name=None, force=None): + + ldb = self.ldb_connect(hostopts, sambaopts, credopts) + + try: + claim_type = ClaimType.get(ldb, display_name=name) + except ModelError as e: + raise CommandError(e) + + # Check if claim type exists first. + if claim_type is None: + raise CommandError(f"Claim type {name} not found.") + + # Delete claim type. + try: + if force: + claim_type.unprotect(ldb) + + claim_type.delete(ldb) + except ModelError as e: + if not force: + raise CommandError( + f"{e}\nTry --force to delete protected claim types.") + else: + raise CommandError(e) + + # Claim type deleted successfully. + self.outf.write(f"Deleted claim type: {name}\n") + + +class cmd_domain_claim_claim_type_list(Command): + """List claim types on the domain.""" + + synopsis = "%prog -H [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "hostopts": options.HostOptions, + } + + takes_options = [ + Option("--json", help="Output results in JSON format.", + dest="output_format", action="store_const", const="json"), + ] + + def run(self, hostopts=None, sambaopts=None, credopts=None, + output_format=None): + + ldb = self.ldb_connect(hostopts, sambaopts, credopts) + + # Claim types grouped by displayName. + try: + claim_types = {claim_type.display_name: claim_type.as_dict() + for claim_type in ClaimType.query(ldb)} + except ModelError as e: + raise CommandError(e) + + # Using json output format gives more detail. + if output_format == "json": + self.print_json(claim_types) + else: + for claim_type in claim_types.keys(): + self.outf.write(f"{claim_type}\n") + + +class cmd_domain_claim_claim_type_view(Command): + """View a single claim type on the domain.""" + + synopsis = "%prog -H [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "hostopts": options.HostOptions, + } + + takes_options = [ + Option("--name", help="Display name of claim type to view (required).", + dest="name", action="store", type=str, required=True), + ] + + def run(self, hostopts=None, sambaopts=None, credopts=None, name=None): + + ldb = self.ldb_connect(hostopts, sambaopts, credopts) + + try: + claim_type = ClaimType.get(ldb, display_name=name) + except ModelError as e: + raise CommandError(e) + + # Check if claim type exists first. + if claim_type is None: + raise CommandError(f"Claim type {name} not found.") + + # Display claim type as JSON. + self.print_json(claim_type.as_dict()) + + +class cmd_domain_claim_claim_type(SuperCommand): + """Manage claim types on the domain.""" + + subcommands = { + "create": cmd_domain_claim_claim_type_create(), + "delete": cmd_domain_claim_claim_type_delete(), + "modify": cmd_domain_claim_claim_type_modify(), + "list": cmd_domain_claim_claim_type_list(), + "view": cmd_domain_claim_claim_type_view(), + } diff --git a/python/samba/netcmd/domain/claim/value_type.py b/python/samba/netcmd/domain/claim/value_type.py new file mode 100644 index 0000000..a261113 --- /dev/null +++ b/python/samba/netcmd/domain/claim/value_type.py @@ -0,0 +1,105 @@ +# Unix SMB/CIFS implementation. +# +# claim value type management +# +# Copyright (C) Catalyst.Net Ltd. 2023 +# +# Written by Rob van der Linde +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import samba.getopt as options +from samba.netcmd import Command, CommandError, Option, SuperCommand +from samba.netcmd.domain.models import ValueType +from samba.netcmd.domain.models.exceptions import ModelError + + +class cmd_domain_claim_value_type_list(Command): + """List claim values types on the domain.""" + + synopsis = "%prog -H [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "hostopts": options.HostOptions, + } + + takes_options = [ + Option("--json", help="Output results in JSON format.", + dest="output_format", action="store_const", const="json"), + ] + + def run(self, hostopts=None, sambaopts=None, credopts=None, + output_format=None): + + ldb = self.ldb_connect(hostopts, sambaopts, credopts) + + # Value types grouped by display name. + try: + value_types = {value_type.display_name: value_type.as_dict() + for value_type in ValueType.query(ldb)} + except ModelError as e: + raise CommandError(e) + + # Using json output format gives more detail. + if output_format == "json": + self.print_json(value_types) + else: + for value_type in value_types.keys(): + self.outf.write(f"{value_type}\n") + + +class cmd_domain_claim_value_type_view(Command): + """View a single claim value type on the domain.""" + + synopsis = "%prog -H [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "hostopts": options.HostOptions, + } + + takes_options = [ + Option("--name", + help="Display name of claim value type to view (required).", + dest="name", action="store", type=str, required=True), + ] + + def run(self, hostopts=None, sambaopts=None, credopts=None, name=None): + + ldb = self.ldb_connect(hostopts, sambaopts, credopts) + + try: + value_type = ValueType.get(ldb, display_name=name) + except ModelError as e: + raise CommandError(e) + + # Check if value type exists first. + if value_type is None: + raise CommandError(f"Value type {name} not found.") + + # Display vale type as JSON. + self.print_json(value_type.as_dict()) + + +class cmd_domain_claim_value_type(SuperCommand): + """Manage claim value types on the domain.""" + + subcommands = { + "list": cmd_domain_claim_value_type_list(), + "view": cmd_domain_claim_value_type_view(), + } diff --git a/python/samba/netcmd/domain/classicupgrade.py b/python/samba/netcmd/domain/classicupgrade.py new file mode 100644 index 0000000..5b6a8a8 --- /dev/null +++ b/python/samba/netcmd/domain/classicupgrade.py @@ -0,0 +1,189 @@ +# domain management - domain classicupgrade +# +# Copyright Matthias Dieter Wallnoefer 2009 +# Copyright Andrew Kroeger 2009 +# Copyright Jelmer Vernooij 2007-2012 +# Copyright Giampaolo Lauria 2011 +# Copyright Matthieu Patou 2011 +# Copyright Andrew Bartlett 2008-2015 +# Copyright Stefan Metzmacher 2012 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import os +import tempfile +import subprocess + +import samba +import samba.getopt as options +from samba.auth import system_session +from samba.auth_util import system_session_unix +from samba.common import get_string +from samba.netcmd import Command, CommandError, Option +from samba.samba3 import Samba3 +from samba.samba3 import param as s3param +from samba.upgrade import upgrade_from_samba3 + +from .common import common_ntvfs_options + + +def get_testparm_var(testparm, smbconf, varname): + errfile = open(os.devnull, 'w') + p = subprocess.Popen([testparm, '-s', '-l', + '--parameter-name=%s' % varname, smbconf], + stdout=subprocess.PIPE, stderr=errfile) + (out, err) = p.communicate() + errfile.close() + lines = out.split(b'\n') + if lines: + return get_string(lines[0]).strip() + return "" + + +class cmd_domain_classicupgrade(Command): + """Upgrade from Samba classic (NT4-like) database to Samba AD DC database. + + Specify either a directory with all Samba classic DC databases and state files (with --dbdir) or + the testparm utility from your classic installation (with --testparm). + """ + + synopsis = "%prog [options] " + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions + } + + takes_options = [ + Option("--dbdir", type="string", metavar="DIR", + help="Path to samba classic DC database directory"), + Option("--testparm", type="string", metavar="PATH", + help="Path to samba classic DC testparm utility from the previous installation. This allows the default paths of the previous installation to be followed"), + Option("--targetdir", type="string", metavar="DIR", + help="Path prefix where the new Samba 4.0 AD domain should be initialised"), + Option("-q", "--quiet", help="Be quiet", action="store_true"), + Option("-v", "--verbose", help="Be verbose", action="store_true"), + Option("--dns-backend", type="choice", metavar="NAMESERVER-BACKEND", + choices=["SAMBA_INTERNAL", "BIND9_FLATFILE", "BIND9_DLZ", "NONE"], + help="The DNS server backend. SAMBA_INTERNAL is the builtin name server (default), " + "BIND9_FLATFILE uses bind9 text database to store zone information, " + "BIND9_DLZ uses samba4 AD to store zone information, " + "NONE skips the DNS setup entirely (this DC will not be a DNS server)", + default="SAMBA_INTERNAL") + ] + + ntvfs_options = [ + Option("--use-xattrs", type="choice", choices=["yes", "no", "auto"], + metavar="[yes|no|auto]", + help="Define if we should use the native fs capabilities or a tdb file for " + "storing attributes likes ntacl when --use-ntvfs is set. " + "auto tries to make an intelligent guess based on the user rights and system capabilities", + default="auto") + ] + if samba.is_ntvfs_fileserver_built(): + takes_options.extend(common_ntvfs_options) + takes_options.extend(ntvfs_options) + + takes_args = ["smbconf"] + + def run(self, smbconf=None, targetdir=None, dbdir=None, testparm=None, + quiet=False, verbose=False, use_xattrs="auto", sambaopts=None, versionopts=None, + dns_backend=None, use_ntvfs=False): + + if not os.path.exists(smbconf): + raise CommandError("File %s does not exist" % smbconf) + + if testparm and not os.path.exists(testparm): + raise CommandError("Testparm utility %s does not exist" % testparm) + + if dbdir and not os.path.exists(dbdir): + raise CommandError("Directory %s does not exist" % dbdir) + + if not dbdir and not testparm: + raise CommandError("Please specify either dbdir or testparm") + + logger = self.get_logger(verbose=verbose, quiet=quiet) + + if dbdir and testparm: + logger.warning("both dbdir and testparm specified, ignoring dbdir.") + dbdir = None + + lp = sambaopts.get_loadparm() + + s3conf = s3param.get_context() + + if sambaopts.realm: + s3conf.set("realm", sambaopts.realm) + + if targetdir is not None: + if not os.path.isdir(targetdir): + os.mkdir(targetdir) + + eadb = True + if use_xattrs == "yes": + eadb = False + elif use_xattrs == "auto" and not use_ntvfs: + eadb = False + elif not use_ntvfs: + raise CommandError("--use-xattrs=no requires --use-ntvfs (not supported for production use). " + "Please re-run with --use-xattrs omitted.") + elif use_xattrs == "auto" and not s3conf.get("posix:eadb"): + if targetdir: + tmpfile = tempfile.NamedTemporaryFile(dir=os.path.abspath(targetdir)) + else: + tmpfile = tempfile.NamedTemporaryFile(dir=os.path.abspath(os.path.dirname(lp.get("private dir")))) + try: + try: + samba.ntacls.setntacl(lp, tmpfile.name, + "O:S-1-5-32G:S-1-5-32", + "S-1-5-32", + system_session_unix(), + "native") + eadb = False + except Exception: + # FIXME: Don't catch all exceptions here + logger.info("You are not root or your system does not support xattr, using tdb backend for attributes. " + "If you intend to use this provision in production, rerun the script as root on a system supporting xattrs.") + finally: + tmpfile.close() + + # Set correct default values from dbdir or testparm + paths = {} + if dbdir: + paths["state directory"] = dbdir + paths["private dir"] = dbdir + paths["lock directory"] = dbdir + paths["smb passwd file"] = dbdir + "/smbpasswd" + else: + paths["state directory"] = get_testparm_var(testparm, smbconf, "state directory") + paths["private dir"] = get_testparm_var(testparm, smbconf, "private dir") + paths["smb passwd file"] = get_testparm_var(testparm, smbconf, "smb passwd file") + paths["lock directory"] = get_testparm_var(testparm, smbconf, "lock directory") + # "testparm" from Samba 3 < 3.4.x is not aware of the parameter + # "state directory", instead make use of "lock directory" + if len(paths["state directory"]) == 0: + paths["state directory"] = paths["lock directory"] + + for p in paths: + s3conf.set(p, paths[p]) + + # load smb.conf parameters + logger.info("Reading smb.conf") + s3conf.load(smbconf) + samba3 = Samba3(smbconf, s3conf) + + logger.info("Provisioning") + upgrade_from_samba3(samba3, logger, targetdir, session_info=system_session(), + useeadb=eadb, dns_backend=dns_backend, use_ntvfs=use_ntvfs) diff --git a/python/samba/netcmd/domain/common.py b/python/samba/netcmd/domain/common.py new file mode 100644 index 0000000..144d22b --- /dev/null +++ b/python/samba/netcmd/domain/common.py @@ -0,0 +1,64 @@ +# domain management - common code +# +# Copyright Matthias Dieter Wallnoefer 2009 +# Copyright Andrew Kroeger 2009 +# Copyright Jelmer Vernooij 2007-2012 +# Copyright Giampaolo Lauria 2011 +# Copyright Matthieu Patou 2011 +# Copyright Andrew Bartlett 2008-2015 +# Copyright Stefan Metzmacher 2012 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +from samba.netcmd import Option +from samba.samdb import get_default_backend_store + +common_ntvfs_options = [ + Option("--use-ntvfs", help="Use NTVFS for the fileserver (default = no)", + action="store_true") +] + +common_provision_join_options = [ + Option("--machinepass", type="string", metavar="PASSWORD", + help="choose machine password (otherwise random)"), + Option("--plaintext-secrets", action="store_true", + help="Store secret/sensitive values as plain text on disk" + + "(default is to encrypt secret/sensitive values)"), + Option("--backend-store", type="choice", metavar="BACKENDSTORE", + choices=["tdb", "mdb"], + help="Specify the database backend to be used " + "(default is %s)" % get_default_backend_store()), + Option("--backend-store-size", type="bytes", metavar="SIZE", + help="Specify the size of the backend database, currently only " + + "supported by lmdb backends (default is 8 Gb)."), + Option("--targetdir", metavar="DIR", + help="Set target directory (where to store provision)", type=str), + Option("-q", "--quiet", help="Be quiet", action="store_true"), +] + +common_join_options = [ + Option("--server", help="DC to join", type=str), + Option("--site", help="site to join", type=str), + Option("--domain-critical-only", + help="only replicate critical domain objects", + action="store_true"), + Option("--dns-backend", type="choice", metavar="NAMESERVER-BACKEND", + choices=["SAMBA_INTERNAL", "BIND9_DLZ", "NONE"], + help="The DNS server backend. SAMBA_INTERNAL is the builtin name server (default), " + "BIND9_DLZ uses samba4 AD to store zone information, " + "NONE skips the DNS setup entirely (this DC will not be a DNS server)", + default="SAMBA_INTERNAL"), + Option("-v", "--verbose", help="Be verbose", action="store_true") +] diff --git a/python/samba/netcmd/domain/dcpromo.py b/python/samba/netcmd/domain/dcpromo.py new file mode 100644 index 0000000..bf78b74 --- /dev/null +++ b/python/samba/netcmd/domain/dcpromo.py @@ -0,0 +1,90 @@ +# domain management - domain dcpromo +# +# Copyright Matthias Dieter Wallnoefer 2009 +# Copyright Andrew Kroeger 2009 +# Copyright Jelmer Vernooij 2007-2012 +# Copyright Giampaolo Lauria 2011 +# Copyright Matthieu Patou 2011 +# Copyright Andrew Bartlett 2008-2015 +# Copyright Stefan Metzmacher 2012 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import samba +import samba.getopt as options +from samba.join import join_DC, join_RODC +from samba.net import Net +from samba.netcmd import Command, CommandError + +from .common import (common_join_options, common_ntvfs_options, + common_provision_join_options) + + +class cmd_domain_dcpromo(Command): + """Promote an existing domain member or NT4 PDC to an AD DC.""" + + synopsis = "%prog [DC|RODC] [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [] + takes_options.extend(common_join_options) + + takes_options.extend(common_provision_join_options) + + if samba.is_ntvfs_fileserver_built(): + takes_options.extend(common_ntvfs_options) + + takes_args = ["domain", "role?"] + + def run(self, domain, role=None, sambaopts=None, credopts=None, + versionopts=None, server=None, site=None, targetdir=None, + domain_critical_only=False, machinepass=None, + use_ntvfs=False, dns_backend=None, + quiet=False, verbose=False, plaintext_secrets=False, + backend_store=None, backend_store_size=None): + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp) + + logger = self.get_logger(verbose=verbose, quiet=quiet) + + netbios_name = lp.get("netbios name") + + if role is not None: + role = role.upper() + + if role == "DC": + join_DC(logger=logger, server=server, creds=creds, lp=lp, domain=domain, + site=site, netbios_name=netbios_name, targetdir=targetdir, + domain_critical_only=domain_critical_only, + machinepass=machinepass, use_ntvfs=use_ntvfs, + dns_backend=dns_backend, + promote_existing=True, plaintext_secrets=plaintext_secrets, + backend_store=backend_store, + backend_store_size=backend_store_size) + elif role == "RODC": + join_RODC(logger=logger, server=server, creds=creds, lp=lp, domain=domain, + site=site, netbios_name=netbios_name, targetdir=targetdir, + domain_critical_only=domain_critical_only, + machinepass=machinepass, use_ntvfs=use_ntvfs, dns_backend=dns_backend, + promote_existing=True, plaintext_secrets=plaintext_secrets, + backend_store=backend_store, + backend_store_size=backend_store_size) + else: + raise CommandError("Invalid role '%s' (possible values: DC, RODC)" % role) diff --git a/python/samba/netcmd/domain/demote.py b/python/samba/netcmd/domain/demote.py new file mode 100644 index 0000000..ae4d11d --- /dev/null +++ b/python/samba/netcmd/domain/demote.py @@ -0,0 +1,335 @@ +# domain management - domain demote +# +# Copyright Matthias Dieter Wallnoefer 2009 +# Copyright Andrew Kroeger 2009 +# Copyright Jelmer Vernooij 2007-2012 +# Copyright Giampaolo Lauria 2011 +# Copyright Matthieu Patou 2011 +# Copyright Andrew Bartlett 2008-2015 +# Copyright Stefan Metzmacher 2012 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import ldb +import samba.getopt as options +from samba import dsdb, remove_dc, werror +from samba.auth import system_session +from samba.dcerpc import drsuapi, misc +from samba.drs_utils import drsuapi_connect +from samba.dsdb import ( + DS_NTDSDSA_OPT_DISABLE_INBOUND_REPL, + DS_NTDSDSA_OPT_DISABLE_OUTBOUND_REPL, + UF_PARTIAL_SECRETS_ACCOUNT, + UF_SERVER_TRUST_ACCOUNT, + UF_TRUSTED_FOR_DELEGATION, + UF_WORKSTATION_TRUST_ACCOUNT +) +from samba.net import Net +from samba.netcmd import Command, CommandError, Option +from samba.samdb import SamDB + + +class cmd_domain_demote(Command): + """Demote ourselves from the role of Domain Controller.""" + + synopsis = "%prog [options]" + + takes_options = [ + Option("--server", help="writable DC to write demotion changes on", type=str), + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H"), + Option("--remove-other-dead-server", help="Dead DC (name or NTDS GUID) " + "to remove ALL references to (rather than this DC)", type=str), + Option("-q", "--quiet", help="Be quiet", action="store_true"), + Option("-v", "--verbose", help="Be verbose", action="store_true"), + ] + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "versionopts": options.VersionOptions, + } + + def run(self, sambaopts=None, credopts=None, + versionopts=None, server=None, + remove_other_dead_server=None, H=None, + verbose=False, quiet=False): + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp) + + logger = self.get_logger(verbose=verbose, quiet=quiet) + + if remove_other_dead_server is not None: + if server is not None: + samdb = SamDB(url="ldap://%s" % server, + session_info=system_session(), + credentials=creds, lp=lp) + else: + samdb = SamDB(url=H, session_info=system_session(), credentials=creds, lp=lp) + try: + remove_dc.remove_dc(samdb, logger, remove_other_dead_server) + except remove_dc.DemoteException as err: + raise CommandError("Demote failed: %s" % err) + return + + netbios_name = lp.get("netbios name") + samdb = SamDB(url=H, session_info=system_session(), credentials=creds, lp=lp) + if not server: + res = samdb.search(expression='(&(objectClass=computer)(serverReferenceBL=*))', attrs=["dnsHostName", "name"]) + if (len(res) == 0): + raise CommandError("Unable to search for servers") + + if (len(res) == 1): + raise CommandError("You are the last server in the domain") + + server = None + for e in res: + if str(e["name"]).lower() != netbios_name.lower(): + server = e["dnsHostName"] + break + + ntds_guid = samdb.get_ntds_GUID() + msg = samdb.search(base=str(samdb.get_config_basedn()), + scope=ldb.SCOPE_SUBTREE, expression="(objectGUID=%s)" % ntds_guid, + attrs=['options']) + if len(msg) == 0 or "options" not in msg[0]: + raise CommandError("Failed to find options on %s" % ntds_guid) + + ntds_dn = msg[0].dn + dsa_options = int(str(msg[0]['options'])) + + res = samdb.search(expression="(fSMORoleOwner=%s)" % str(ntds_dn), + controls=["search_options:1:2"]) + + if len(res) != 0: + raise CommandError("Current DC is still the owner of %d role(s), " + "use the role command to transfer roles to " + "another DC" % + len(res)) + + self.errf.write("Using %s as partner server for the demotion\n" % + server) + (drsuapiBind, drsuapi_handle, supportedExtensions) = drsuapi_connect(server, lp, creds) + + self.errf.write("Deactivating inbound replication\n") + + nmsg = ldb.Message() + nmsg.dn = msg[0].dn + + if not (dsa_options & DS_NTDSDSA_OPT_DISABLE_OUTBOUND_REPL) and not samdb.am_rodc(): + dsa_options |= DS_NTDSDSA_OPT_DISABLE_INBOUND_REPL + nmsg["options"] = ldb.MessageElement(str(dsa_options), ldb.FLAG_MOD_REPLACE, "options") + samdb.modify(nmsg) + + self.errf.write("Asking partner server %s to synchronize from us\n" + % server) + for part in (samdb.get_schema_basedn(), + samdb.get_config_basedn(), + samdb.get_root_basedn()): + nc = drsuapi.DsReplicaObjectIdentifier() + nc.dn = str(part) + + req1 = drsuapi.DsReplicaSyncRequest1() + req1.naming_context = nc + req1.options = drsuapi.DRSUAPI_DRS_WRIT_REP + req1.source_dsa_guid = misc.GUID(ntds_guid) + + try: + drsuapiBind.DsReplicaSync(drsuapi_handle, 1, req1) + except RuntimeError as e1: + (werr, string) = e1.args + if werr == werror.WERR_DS_DRA_NO_REPLICA: + pass + else: + self.errf.write( + "Error while replicating out last local changes from '%s' for demotion, " + "re-enabling inbound replication\n" % part) + dsa_options ^= DS_NTDSDSA_OPT_DISABLE_INBOUND_REPL + nmsg["options"] = ldb.MessageElement(str(dsa_options), ldb.FLAG_MOD_REPLACE, "options") + samdb.modify(nmsg) + raise CommandError("Error while sending a DsReplicaSync for partition '%s'" % str(part), string) + try: + remote_samdb = SamDB(url="ldap://%s" % server, + session_info=system_session(), + credentials=creds, lp=lp) + + self.errf.write("Changing userControl and container\n") + res = remote_samdb.search(base=str(remote_samdb.domain_dn()), + expression="(&(objectClass=user)(sAMAccountName=%s$))" % + netbios_name.upper(), + attrs=["userAccountControl"]) + dc_dn = res[0].dn + uac = int(str(res[0]["userAccountControl"])) + + except Exception as e: + if not (dsa_options & DS_NTDSDSA_OPT_DISABLE_OUTBOUND_REPL) and not samdb.am_rodc(): + self.errf.write( + "Error while demoting, re-enabling inbound replication\n") + dsa_options ^= DS_NTDSDSA_OPT_DISABLE_INBOUND_REPL + nmsg["options"] = ldb.MessageElement(str(dsa_options), ldb.FLAG_MOD_REPLACE, "options") + samdb.modify(nmsg) + raise CommandError("Error while changing account control", e) + + if (len(res) != 1): + if not (dsa_options & DS_NTDSDSA_OPT_DISABLE_OUTBOUND_REPL) and not samdb.am_rodc(): + self.errf.write( + "Error while demoting, re-enabling inbound replication\n") + dsa_options ^= DS_NTDSDSA_OPT_DISABLE_INBOUND_REPL + nmsg["options"] = ldb.MessageElement(str(dsa_options), ldb.FLAG_MOD_REPLACE, "options") + samdb.modify(nmsg) + raise CommandError("Unable to find object with samaccountName = %s$" + " in the remote dc" % netbios_name.upper()) + + uac &= ~(UF_SERVER_TRUST_ACCOUNT | + UF_TRUSTED_FOR_DELEGATION | + UF_PARTIAL_SECRETS_ACCOUNT) + uac |= UF_WORKSTATION_TRUST_ACCOUNT + + msg = ldb.Message() + msg.dn = dc_dn + + msg["userAccountControl"] = ldb.MessageElement("%d" % uac, + ldb.FLAG_MOD_REPLACE, + "userAccountControl") + try: + remote_samdb.modify(msg) + except Exception as e: + if not (dsa_options & DS_NTDSDSA_OPT_DISABLE_OUTBOUND_REPL) and not samdb.am_rodc(): + self.errf.write( + "Error while demoting, re-enabling inbound replication\n") + dsa_options ^= DS_NTDSDSA_OPT_DISABLE_INBOUND_REPL + nmsg["options"] = ldb.MessageElement(str(dsa_options), ldb.FLAG_MOD_REPLACE, "options") + samdb.modify(nmsg) + + raise CommandError("Error while changing account control", e) + + dc_name = res[0].dn.get_rdn_value() + rdn = "CN=%s" % dc_name + + # Let's move to the Computer container + i = 0 + newrdn = str(rdn) + + computer_dn = remote_samdb.get_wellknown_dn( + remote_samdb.get_default_basedn(), + dsdb.DS_GUID_COMPUTERS_CONTAINER) + res = remote_samdb.search(base=computer_dn, expression=rdn, scope=ldb.SCOPE_ONELEVEL) + + if (len(res) != 0): + res = remote_samdb.search(base=computer_dn, expression="%s-%d" % (rdn, i), + scope=ldb.SCOPE_ONELEVEL) + while(len(res) != 0 and i < 100): + i = i + 1 + res = remote_samdb.search(base=computer_dn, expression="%s-%d" % (rdn, i), + scope=ldb.SCOPE_ONELEVEL) + + if i == 100: + if not (dsa_options & DS_NTDSDSA_OPT_DISABLE_OUTBOUND_REPL) and not samdb.am_rodc(): + self.errf.write( + "Error while demoting, re-enabling inbound replication\n") + dsa_options ^= DS_NTDSDSA_OPT_DISABLE_INBOUND_REPL + nmsg["options"] = ldb.MessageElement(str(dsa_options), ldb.FLAG_MOD_REPLACE, "options") + samdb.modify(nmsg) + + msg = ldb.Message() + msg.dn = dc_dn + + msg["userAccountControl"] = ldb.MessageElement("%d" % uac, + ldb.FLAG_MOD_REPLACE, + "userAccountControl") + + remote_samdb.modify(msg) + + raise CommandError("Unable to find a slot for renaming %s," + " all names from %s-1 to %s-%d seemed used" % + (str(dc_dn), rdn, rdn, i - 9)) + + newrdn = "%s-%d" % (rdn, i) + + try: + newdn = ldb.Dn(remote_samdb, "%s,%s" % (newrdn, str(computer_dn))) + remote_samdb.rename(dc_dn, newdn) + except Exception as e: + if not (dsa_options & DS_NTDSDSA_OPT_DISABLE_OUTBOUND_REPL) and not samdb.am_rodc(): + self.errf.write( + "Error while demoting, re-enabling inbound replication\n") + dsa_options ^= DS_NTDSDSA_OPT_DISABLE_INBOUND_REPL + nmsg["options"] = ldb.MessageElement(str(dsa_options), ldb.FLAG_MOD_REPLACE, "options") + samdb.modify(nmsg) + + msg = ldb.Message() + msg.dn = dc_dn + + msg["userAccountControl"] = ldb.MessageElement("%d" % uac, + ldb.FLAG_MOD_REPLACE, + "userAccountControl") + + remote_samdb.modify(msg) + raise CommandError("Error while renaming %s to %s" % (str(dc_dn), str(newdn)), e) + + server_dsa_dn = samdb.get_serverName() + domain = remote_samdb.get_root_basedn() + + try: + req1 = drsuapi.DsRemoveDSServerRequest1() + req1.server_dn = str(server_dsa_dn) + req1.domain_dn = str(domain) + req1.commit = 1 + + drsuapiBind.DsRemoveDSServer(drsuapi_handle, 1, req1) + except RuntimeError as e3: + (werr, string) = e3.args + if not (dsa_options & DS_NTDSDSA_OPT_DISABLE_OUTBOUND_REPL) and not samdb.am_rodc(): + self.errf.write( + "Error while demoting, re-enabling inbound replication\n") + dsa_options ^= DS_NTDSDSA_OPT_DISABLE_INBOUND_REPL + nmsg["options"] = ldb.MessageElement(str(dsa_options), ldb.FLAG_MOD_REPLACE, "options") + samdb.modify(nmsg) + + msg = ldb.Message() + msg.dn = newdn + + msg["userAccountControl"] = ldb.MessageElement("%d" % uac, + ldb.FLAG_MOD_REPLACE, + "userAccountControl") + remote_samdb.modify(msg) + remote_samdb.rename(newdn, dc_dn) + if werr == werror.WERR_DS_DRA_NO_REPLICA: + raise CommandError("The DC %s is not present on (already " + "removed from) the remote server: %s" % + (server_dsa_dn, e3)) + else: + raise CommandError("Error while sending a removeDsServer " + "of %s: %s" % + (server_dsa_dn, e3)) + + remove_dc.remove_sysvol_references(remote_samdb, logger, dc_name) + + # These are objects under the computer account that should be deleted + for s in ("CN=Enterprise,CN=NTFRS Subscriptions", + "CN=%s, CN=NTFRS Subscriptions" % lp.get("realm"), + "CN=Domain system Volumes (SYSVOL Share), CN=NTFRS Subscriptions", + "CN=NTFRS Subscriptions"): + try: + remote_samdb.delete(ldb.Dn(remote_samdb, + "%s,%s" % (s, str(newdn)))) + except ldb.LdbError: + pass + + # get dns host name for target server to demote, remove dns references + remove_dc.remove_dns_references(remote_samdb, logger, samdb.host_dns_name(), + ignore_no_name=True) + + self.errf.write("Demote successful\n") diff --git a/python/samba/netcmd/domain/functional_prep.py b/python/samba/netcmd/domain/functional_prep.py new file mode 100644 index 0000000..3e1d4e1 --- /dev/null +++ b/python/samba/netcmd/domain/functional_prep.py @@ -0,0 +1,145 @@ +# domain management - domain functional_prep +# +# Copyright Matthias Dieter Wallnoefer 2009 +# Copyright Andrew Kroeger 2009 +# Copyright Jelmer Vernooij 2007-2012 +# Copyright Giampaolo Lauria 2011 +# Copyright Matthieu Patou 2011 +# Copyright Andrew Bartlett 2008-2015 +# Copyright Stefan Metzmacher 2012 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import ldb +import samba.getopt as options +from samba.auth import system_session +from samba.dsdb import DS_DOMAIN_FUNCTION_2008, DS_DOMAIN_FUNCTION_2008_R2 +from samba.netcmd import Command, CommandError, Option +from samba.netcmd.fsmo import get_fsmo_roleowner +from samba.samdb import SamDB + +from samba import functional_level + + +class cmd_domain_functional_prep(Command): + """Domain functional level preparation""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H"), + Option("-q", "--quiet", help="Be quiet", action="store_true"), + Option("-v", "--verbose", help="Be verbose", action="store_true"), + Option("--function-level", type="choice", metavar="FUNCTION_LEVEL", + choices=["2008_R2", "2012", "2012_R2", "2016"], + help="The functional level to prepare for. Default is (Windows) 2016.", + default="2016"), + Option("--forest-prep", action="store_true", + help="Run the forest prep (by default, both the domain and forest prep are run)."), + Option("--domain-prep", action="store_true", + help="Run the domain prep (by default, both the domain and forest prep are run).") + ] + + def run(self, **kwargs): + updates_allowed_overridden = False + sambaopts = kwargs.get("sambaopts") + credopts = kwargs.get("credopts") + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp) + H = kwargs.get("H") + function_level = kwargs.get("function_level") + try: + target_level = functional_level.string_to_level(function_level) + except KeyError: + raise CommandError(f"'{function_level}' is not known to Samba as an AD functional level") + + forest_prep = kwargs.get("forest_prep") + domain_prep = kwargs.get("domain_prep") + + samdb = SamDB(url=H, session_info=system_session(), credentials=creds, lp=lp) + + # we're not going to get far if the config doesn't allow schema updates + if lp.get("dsdb:schema update allowed") is None: + lp.set("dsdb:schema update allowed", "yes") + print("Temporarily overriding 'dsdb:schema update allowed' setting") + updates_allowed_overridden = True + + if forest_prep is None and domain_prep is None: + forest_prep = True + domain_prep = True + + own_dn = ldb.Dn(samdb, samdb.get_dsServiceName()) + if forest_prep: + master = get_fsmo_roleowner(samdb, str(samdb.get_schema_basedn()), + 'schema') + if own_dn != master: + raise CommandError("This server is not the schema master.") + + if domain_prep: + domain_dn = samdb.domain_dn() + infrastructure_dn = "CN=Infrastructure," + domain_dn + master = get_fsmo_roleowner(samdb, infrastructure_dn, + 'infrastructure') + if own_dn != master: + raise CommandError("This server is not the infrastructure master.") + + exception_encountered = None + + if forest_prep and exception_encountered is None: + samdb.transaction_start() + try: + from samba.forest_update import ForestUpdate + forest = ForestUpdate(samdb, fix=True) + + forest.check_updates_iterator([11, 54, 79, 80, 81, 82, 83]) + forest.check_updates_functional_level(target_level, + DS_DOMAIN_FUNCTION_2008_R2, + update_revision=True) + + samdb.transaction_commit() + except Exception as e: + print("Exception: %s" % e) + samdb.transaction_cancel() + exception_encountered = e + + if domain_prep and exception_encountered is None: + samdb.transaction_start() + try: + from samba.domain_update import DomainUpdate + + domain = DomainUpdate(samdb, fix=True) + domain.check_updates_functional_level(target_level, + DS_DOMAIN_FUNCTION_2008, + update_revision=True) + + samdb.transaction_commit() + except Exception as e: + print("Exception: %s" % e) + samdb.transaction_cancel() + exception_encountered = e + + if updates_allowed_overridden: + lp.set("dsdb:schema update allowed", "no") + + if exception_encountered is not None: + raise CommandError('Failed to perform functional prep: %r' % + exception_encountered) diff --git a/python/samba/netcmd/domain/info.py b/python/samba/netcmd/domain/info.py new file mode 100644 index 0000000..8454cb3 --- /dev/null +++ b/python/samba/netcmd/domain/info.py @@ -0,0 +1,58 @@ +# domain management - domain info +# +# Copyright Matthias Dieter Wallnoefer 2009 +# Copyright Andrew Kroeger 2009 +# Copyright Jelmer Vernooij 2007-2012 +# Copyright Giampaolo Lauria 2011 +# Copyright Matthieu Patou 2011 +# Copyright Andrew Bartlett 2008-2015 +# Copyright Stefan Metzmacher 2012 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import samba.getopt as options +from samba.netcmd import Command, CommandError +from samba.netcmd.common import netcmd_get_domain_infos_via_cldap + + +class cmd_domain_info(Command): + """Print basic info about a domain and the DC passed as parameter.""" + + synopsis = "%prog [options]" + + takes_options = [ + ] + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "versionopts": options.VersionOptions, + } + + takes_args = ["address"] + + def run(self, address, credopts=None, sambaopts=None, versionopts=None): + lp = sambaopts.get_loadparm() + try: + res = netcmd_get_domain_infos_via_cldap(lp, None, address) + except RuntimeError: + raise CommandError("Invalid IP address '" + address + "'!") + self.outf.write("Forest : %s\n" % res.forest) + self.outf.write("Domain : %s\n" % res.dns_domain) + self.outf.write("Netbios domain : %s\n" % res.domain_name) + self.outf.write("DC name : %s\n" % res.pdc_dns_name) + self.outf.write("DC netbios name : %s\n" % res.pdc_name) + self.outf.write("Server site : %s\n" % res.server_site) + self.outf.write("Client site : %s\n" % res.client_site) diff --git a/python/samba/netcmd/domain/join.py b/python/samba/netcmd/domain/join.py new file mode 100644 index 0000000..936cfa8 --- /dev/null +++ b/python/samba/netcmd/domain/join.py @@ -0,0 +1,146 @@ +# domain management - domain join +# +# Copyright Matthias Dieter Wallnoefer 2009 +# Copyright Andrew Kroeger 2009 +# Copyright Jelmer Vernooij 2007-2012 +# Copyright Giampaolo Lauria 2011 +# Copyright Matthieu Patou 2011 +# Copyright Andrew Bartlett 2008-2015 +# Copyright Stefan Metzmacher 2012 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import os +import tempfile + +import samba +import samba.getopt as options +from samba import is_ad_dc_built +from samba.dcerpc import nbt +from samba.join import join_DC, join_RODC +from samba.net import LIBNET_JOIN_AUTOMATIC, Net +from samba.net_s3 import Net as s3_Net +from samba.netcmd import Command, CommandError, Option +from samba.param import default_path +from samba.samba3 import param as s3param + +from .common import common_join_options, common_provision_join_options + + +class cmd_domain_join(Command): + """Join domain as either member or backup domain controller.""" + + synopsis = "%prog [DC|RODC|MEMBER] [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + ntvfs_options = [ + Option( + "--use-ntvfs", help="Use NTVFS for the fileserver (default = no)", + action="store_true") + ] + + selftest_options = [ + Option("--experimental-s4-member", action="store_true", + help="Perform member joins using the s4 Net join_member. " + "Don't choose this unless you know what you're doing") + ] + + takes_options = [ + Option("--no-dns-updates", action="store_true", + help="Disable DNS updates") + ] + takes_options.extend(common_join_options) + takes_options.extend(common_provision_join_options) + + if samba.is_ntvfs_fileserver_built(): + takes_options.extend(ntvfs_options) + + if samba.is_selftest_enabled(): + takes_options.extend(selftest_options) + + takes_args = ["domain", "role?"] + + def run(self, domain, role=None, sambaopts=None, credopts=None, + versionopts=None, server=None, site=None, targetdir=None, + domain_critical_only=False, machinepass=None, + use_ntvfs=False, experimental_s4_member=False, dns_backend=None, + quiet=False, verbose=False, no_dns_updates=False, + plaintext_secrets=False, + backend_store=None, backend_store_size=None): + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp) + net = Net(creds, lp, server=credopts.ipaddress) + + logger = self.get_logger(verbose=verbose, quiet=quiet) + + netbios_name = lp.get("netbios name") + + if role is not None: + role = role.upper() + + if role is None or role == "MEMBER": + if experimental_s4_member: + (join_password, sid, domain_name) = net.join_member( + domain, netbios_name, LIBNET_JOIN_AUTOMATIC, + machinepass=machinepass) + else: + lp.set('realm', domain) + if lp.get('workgroup') == 'WORKGROUP': + lp.set('workgroup', net.finddc(domain=domain, + flags=(nbt.NBT_SERVER_LDAP | + nbt.NBT_SERVER_DS)).domain_name) + lp.set('server role', 'member server') + smb_conf = lp.configfile if lp.configfile else default_path() + with tempfile.NamedTemporaryFile(delete=False, + dir=os.path.dirname(smb_conf)) as f: + lp.dump(False, f.name) + if os.path.exists(smb_conf): + mode = os.stat(smb_conf).st_mode + os.chmod(f.name, mode) + os.rename(f.name, smb_conf) + s3_lp = s3param.get_context() + s3_lp.load(smb_conf) + s3_net = s3_Net(creds, s3_lp, server=server) + (sid, domain_name) = s3_net.join_member(netbios_name, + machinepass=machinepass, + debug=verbose, + noDnsUpdates=no_dns_updates) + + self.errf.write("Joined domain %s (%s)\n" % (domain_name, sid)) + elif role == "DC" and is_ad_dc_built(): + join_DC(logger=logger, server=server, creds=creds, lp=lp, domain=domain, + site=site, netbios_name=netbios_name, targetdir=targetdir, + domain_critical_only=domain_critical_only, + machinepass=machinepass, use_ntvfs=use_ntvfs, + dns_backend=dns_backend, + plaintext_secrets=plaintext_secrets, + backend_store=backend_store, + backend_store_size=backend_store_size) + elif role == "RODC" and is_ad_dc_built(): + join_RODC(logger=logger, server=server, creds=creds, lp=lp, domain=domain, + site=site, netbios_name=netbios_name, targetdir=targetdir, + domain_critical_only=domain_critical_only, + machinepass=machinepass, use_ntvfs=use_ntvfs, + dns_backend=dns_backend, + plaintext_secrets=plaintext_secrets, + backend_store=backend_store, + backend_store_size=backend_store_size) + else: + raise CommandError("Invalid role '%s' (possible values: MEMBER, DC, RODC)" % role) diff --git a/python/samba/netcmd/domain/keytab.py b/python/samba/netcmd/domain/keytab.py new file mode 100644 index 0000000..b0955ca --- /dev/null +++ b/python/samba/netcmd/domain/keytab.py @@ -0,0 +1,55 @@ +# domain management - domain keytab +# +# Copyright Matthias Dieter Wallnoefer 2009 +# Copyright Andrew Kroeger 2009 +# Copyright Jelmer Vernooij 2007-2012 +# Copyright Giampaolo Lauria 2011 +# Copyright Matthieu Patou 2011 +# Copyright Andrew Bartlett 2008-2015 +# Copyright Stefan Metzmacher 2012 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import samba.getopt as options +from samba import enable_net_export_keytab +from samba.net import Net +from samba.netcmd import Command, Option + +try: + enable_net_export_keytab() +except ImportError: + cmd_domain_export_keytab = None +else: + class cmd_domain_export_keytab(Command): + """Dump Kerberos keys of the domain into a keytab.""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "versionopts": options.VersionOptions, + } + + takes_options = [ + Option("--principal", help="extract only this principal", type=str), + ] + + takes_args = ["keytab"] + + def run(self, keytab, credopts=None, sambaopts=None, versionopts=None, principal=None): + lp = sambaopts.get_loadparm() + net = Net(None, lp) + net.export_keytab(keytab=keytab, principal=principal) diff --git a/python/samba/netcmd/domain/leave.py b/python/samba/netcmd/domain/leave.py new file mode 100644 index 0000000..0d58360 --- /dev/null +++ b/python/samba/netcmd/domain/leave.py @@ -0,0 +1,59 @@ +# domain management - domain leave +# +# Copyright Matthias Dieter Wallnoefer 2009 +# Copyright Andrew Kroeger 2009 +# Copyright Jelmer Vernooij 2007-2012 +# Copyright Giampaolo Lauria 2011 +# Copyright Matthieu Patou 2011 +# Copyright Andrew Bartlett 2008-2015 +# Copyright Stefan Metzmacher 2012 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import samba.getopt as options +from samba.net_s3 import Net as s3_Net +from samba.netcmd import Command, Option +from samba.param import default_path +from samba.samba3 import param as s3param + + +class cmd_domain_leave(Command): + """Cause a domain member to leave the joined domain.""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("--keep-account", action="store_true", + help="Disable the machine account instead of deleting it.") + ] + + takes_args = [] + + def run(self, sambaopts=None, credopts=None, versionopts=None, + keep_account=False): + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp) + + s3_lp = s3param.get_context() + smb_conf = lp.configfile if lp.configfile else default_path() + s3_lp.load(smb_conf) + s3_net = s3_Net(creds, s3_lp) + s3_net.leave(keep_account) diff --git a/python/samba/netcmd/domain/level.py b/python/samba/netcmd/domain/level.py new file mode 100644 index 0000000..eefe360 --- /dev/null +++ b/python/samba/netcmd/domain/level.py @@ -0,0 +1,250 @@ +# domain management - domain level +# +# Copyright Matthias Dieter Wallnoefer 2009 +# Copyright Andrew Kroeger 2009 +# Copyright Jelmer Vernooij 2007-2012 +# Copyright Giampaolo Lauria 2011 +# Copyright Matthieu Patou 2011 +# Copyright Andrew Bartlett 2008-2015 +# Copyright Stefan Metzmacher 2012 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import ldb +import samba.getopt as options +from samba.auth import system_session +from samba.dsdb import check_and_update_fl, DS_DOMAIN_FUNCTION_2000 +from samba.netcmd import Command, CommandError, Option +from samba.samdb import SamDB + +from samba import functional_level + + +class cmd_domain_level(Command): + """Raise domain and forest function levels.""" + + synopsis = "%prog (show|raise ) [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "versionopts": options.VersionOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H"), + Option("-q", "--quiet", help="Be quiet", action="store_true"), # unused + Option("--forest-level", type="choice", choices=["2003", "2008", "2008_R2", "2012", "2012_R2", "2016"], + help="The forest function level (2003 | 2008 | 2008_R2 | 2012 | 2012_R2 | 2016)"), + Option("--domain-level", type="choice", choices=["2003", "2008", "2008_R2", "2012", "2012_R2", "2016"], + help="The domain function level (2003 | 2008 | 2008_R2 | 2012 | 2012_R2 | 2016)") + ] + + takes_args = ["subcommand"] + + def run(self, subcommand, H=None, forest_level=None, domain_level=None, + quiet=False, credopts=None, sambaopts=None, versionopts=None): + if subcommand not in ["show", "raise"]: + raise CommandError("invalid argument: '%s' (choose from 'show', 'raise')" % subcommand) + + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp, fallback_machine=True) + + samdb = SamDB(url=H, session_info=system_session(), + credentials=creds, lp=lp) + + domain_dn = samdb.domain_dn() + + in_transaction = False + if subcommand == "raise" and (H is None or not H.startswith("ldap")): + samdb.transaction_start() + in_transaction = True + try: + check_and_update_fl(samdb, lp) + except Exception as e: + samdb.transaction_cancel() + raise e + + try: + res_forest = samdb.search("CN=Partitions,%s" % samdb.get_config_basedn(), + scope=ldb.SCOPE_BASE, attrs=["msDS-Behavior-Version"]) + assert len(res_forest) == 1 + + res_domain = samdb.search(domain_dn, scope=ldb.SCOPE_BASE, + attrs=["msDS-Behavior-Version", "nTMixedDomain"]) + assert len(res_domain) == 1 + + res_domain_cross = samdb.search("CN=Partitions,%s" % samdb.get_config_basedn(), + scope=ldb.SCOPE_SUBTREE, + expression="(&(objectClass=crossRef)(nCName=%s))" % domain_dn, + attrs=["msDS-Behavior-Version"]) + assert len(res_domain_cross) == 1 + + res_dc_s = samdb.search("CN=Sites,%s" % samdb.get_config_basedn(), + scope=ldb.SCOPE_SUBTREE, expression="(objectClass=nTDSDSA)", + attrs=["msDS-Behavior-Version"]) + assert len(res_dc_s) >= 1 + + # default values, since "msDS-Behavior-Version" does not exist on Windows 2000 AD + level_forest = DS_DOMAIN_FUNCTION_2000 + level_domain = DS_DOMAIN_FUNCTION_2000 + + if "msDS-Behavior-Version" in res_forest[0]: + level_forest = int(res_forest[0]["msDS-Behavior-Version"][0]) + if "msDS-Behavior-Version" in res_domain[0]: + level_domain = int(res_domain[0]["msDS-Behavior-Version"][0]) + level_domain_mixed = int(res_domain[0]["nTMixedDomain"][0]) + + min_level_dc = None + for msg in res_dc_s: + if "msDS-Behavior-Version" in msg: + if min_level_dc is None or int(msg["msDS-Behavior-Version"][0]) < min_level_dc: + min_level_dc = int(msg["msDS-Behavior-Version"][0]) + else: + min_level_dc = DS_DOMAIN_FUNCTION_2000 + # well, this is the least + break + + if level_forest < DS_DOMAIN_FUNCTION_2000 or level_domain < DS_DOMAIN_FUNCTION_2000: + raise CommandError("Domain and/or forest function level(s) is/are invalid. Correct them or reprovision!") + if min_level_dc < DS_DOMAIN_FUNCTION_2000: + raise CommandError("Lowest function level of a DC is invalid. Correct this or reprovision!") + if level_forest > level_domain: + raise CommandError("Forest function level is higher than the domain level(s). Correct this or reprovision!") + if level_domain > min_level_dc: + raise CommandError("Domain function level is higher than the lowest function level of a DC. Correct this or reprovision!") + except Exception as e: + if in_transaction: + samdb.transaction_cancel() + raise e + + def do_show(): + self.message("Domain and forest function level for domain '%s'" % domain_dn) + if level_forest == DS_DOMAIN_FUNCTION_2000 and level_domain_mixed != 0: + self.message("\nATTENTION: You run SAMBA 4 on a forest function level lower than Windows 2000 (Native). This isn't supported! Please raise!") + if level_domain == DS_DOMAIN_FUNCTION_2000 and level_domain_mixed != 0: + self.message("\nATTENTION: You run SAMBA 4 on a domain function level lower than Windows 2000 (Native). This isn't supported! Please raise!") + if min_level_dc == DS_DOMAIN_FUNCTION_2000 and level_domain_mixed != 0: + self.message("\nATTENTION: You run SAMBA 4 on a lowest function level of a DC lower than Windows 2003. This isn't supported! Please step-up or upgrade the concerning DC(s)!") + + self.message("") + + outstr = functional_level.level_to_string(level_forest) + self.message("Forest function level: (Windows) " + outstr) + + if level_domain == DS_DOMAIN_FUNCTION_2000 and level_domain_mixed: + outstr = "2000 mixed (NT4 DC support)" + else: + outstr = functional_level.level_to_string(level_domain) + self.message("Domain function level: (Windows) " + outstr) + + outstr = functional_level.level_to_string(min_level_dc) + self.message("Lowest function level of a DC: (Windows) " + outstr) + + def do_raise(): + msgs = [] + + current_level_domain = level_domain + + if domain_level is not None: + try: + new_level_domain = functional_level.string_to_level(domain_level) + except KeyError: + raise CommandError(f"New functional level '{domain_level}' is not known to Samba as an AD functional level") + + if new_level_domain <= level_domain and level_domain_mixed == 0: + raise CommandError("Domain function level can't be smaller than or equal to the actual one!") + if new_level_domain > min_level_dc: + raise CommandError("Domain function level can't be higher than the lowest function level of a DC!") + + # Deactivate mixed/interim domain support + if level_domain_mixed != 0: + # Directly on the base DN + m = ldb.Message() + m.dn = ldb.Dn(samdb, domain_dn) + m["nTMixedDomain"] = ldb.MessageElement("0", + ldb.FLAG_MOD_REPLACE, "nTMixedDomain") + samdb.modify(m) + # Under partitions + m = ldb.Message() + m.dn = res_domain_cross[0].dn + m["nTMixedDomain"] = ldb.MessageElement("0", + ldb.FLAG_MOD_REPLACE, "nTMixedDomain") + try: + samdb.modify(m) + except ldb.LdbError as e: + (enum, emsg) = e.args + if enum != ldb.ERR_UNWILLING_TO_PERFORM: + raise + + # Directly on the base DN + m = ldb.Message() + m.dn = ldb.Dn(samdb, domain_dn) + m["msDS-Behavior-Version"] = ldb.MessageElement( + str(new_level_domain), ldb.FLAG_MOD_REPLACE, + "msDS-Behavior-Version") + samdb.modify(m) + # Under partitions + m = ldb.Message() + m.dn = res_domain_cross[0].dn + m["msDS-Behavior-Version"] = ldb.MessageElement( + str(new_level_domain), ldb.FLAG_MOD_REPLACE, + "msDS-Behavior-Version") + try: + samdb.modify(m) + except ldb.LdbError as e2: + (enum, emsg) = e2.args + if enum != ldb.ERR_UNWILLING_TO_PERFORM: + raise + + current_level_domain = new_level_domain + msgs.append("Domain function level changed!") + + if forest_level is not None: + new_level_forest = functional_level.string_to_level(forest_level) + + if new_level_forest <= level_forest: + raise CommandError("Forest function level can't be smaller than or equal to the actual one!") + if new_level_forest > current_level_domain: + raise CommandError("Forest function level can't be higher than the domain function level(s). Please raise it/them first!") + + m = ldb.Message() + m.dn = ldb.Dn(samdb, "CN=Partitions,%s" % samdb.get_config_basedn()) + m["msDS-Behavior-Version"] = ldb.MessageElement( + str(new_level_forest), ldb.FLAG_MOD_REPLACE, + "msDS-Behavior-Version") + samdb.modify(m) + msgs.append("Forest function level changed!") + msgs.append("All changes applied successfully!") + self.message("\n".join(msgs)) + return + + if subcommand == "show": + assert not in_transaction + do_show() + return + elif subcommand == "raise": + try: + do_raise() + except Exception as e: + if in_transaction: + samdb.transaction_cancel() + raise e + if in_transaction: + samdb.transaction_commit() + return + + raise AssertionError("Internal Error subcommand[%s] not handled" % subcommand) diff --git a/python/samba/netcmd/domain/models/__init__.py b/python/samba/netcmd/domain/models/__init__.py new file mode 100644 index 0000000..8a6b254 --- /dev/null +++ b/python/samba/netcmd/domain/models/__init__.py @@ -0,0 +1,32 @@ +# Unix SMB/CIFS implementation. +# +# Samba domain models. +# +# Copyright (C) Catalyst.Net Ltd. 2023 +# +# Written by Rob van der Linde +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +from .auth_policy import AuthenticationPolicy +from .auth_silo import AuthenticationSilo +from .claim_type import ClaimType +from .group import Group +from .model import MODELS +from .schema import AttributeSchema, ClassSchema +from .site import Site +from .subnet import Subnet +from .user import User +from .value_type import ValueType diff --git a/python/samba/netcmd/domain/models/auth_policy.py b/python/samba/netcmd/domain/models/auth_policy.py new file mode 100644 index 0000000..c56966c --- /dev/null +++ b/python/samba/netcmd/domain/models/auth_policy.py @@ -0,0 +1,109 @@ +# Unix SMB/CIFS implementation. +# +# Authentication policy model. +# +# Copyright (C) Catalyst.Net Ltd. 2023 +# +# Written by Rob van der Linde +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +from enum import IntEnum +from ldb import Dn + +from .fields import (BooleanField, EnumField, IntegerField, SDDLField, + StringField) +from .model import Model + +# Ticket-Granting-Ticket lifetimes. +MIN_TGT_LIFETIME = 45 +MAX_TGT_LIFETIME = 2147483647 + + +class StrongNTLMPolicy(IntEnum): + DISABLED = 0 + OPTIONAL = 1 + REQUIRED = 2 + + @classmethod + def get_choices(cls): + return sorted([choice.capitalize() for choice in cls._member_names_]) + + @classmethod + def choices_str(cls): + return ", ".join(cls.get_choices()) + + +class AuthenticationPolicy(Model): + description = StringField("description") + enforced = BooleanField("msDS-AuthNPolicyEnforced") + strong_ntlm_policy = EnumField("msDS-StrongNTLMPolicy", StrongNTLMPolicy) + user_allow_ntlm_network_auth = BooleanField( + "msDS-UserAllowedNTLMNetworkAuthentication") + user_tgt_lifetime = IntegerField("msDS-UserTGTLifetime") + service_allow_ntlm_network_auth = BooleanField( + "msDS-ServiceAllowedNTLMNetworkAuthentication") + service_tgt_lifetime = IntegerField("msDS-ServiceTGTLifetime") + computer_tgt_lifetime = IntegerField("msDS-ComputerTGTLifetime") + user_allowed_to_authenticate_from = SDDLField( + "msDS-UserAllowedToAuthenticateFrom", allow_device_in_sddl=False) + user_allowed_to_authenticate_to = SDDLField( + "msDS-UserAllowedToAuthenticateTo") + service_allowed_to_authenticate_from = SDDLField( + "msDS-ServiceAllowedToAuthenticateFrom", allow_device_in_sddl=False) + service_allowed_to_authenticate_to = SDDLField( + "msDS-ServiceAllowedToAuthenticateTo") + computer_allowed_to_authenticate_to = SDDLField( + "msDS-ComputerAllowedToAuthenticateTo") + + @staticmethod + def get_base_dn(ldb): + """Return the base DN for the AuthenticationPolicy model. + + :param ldb: Ldb connection + :return: Dn object of container + """ + base_dn = ldb.get_config_basedn() + base_dn.add_child( + "CN=AuthN Policies,CN=AuthN Policy Configuration,CN=Services") + return base_dn + + @staticmethod + def get_object_class(): + return "msDS-AuthNPolicy" + + @staticmethod + def lookup(ldb, name): + """Helper function to return auth policy or raise LookupError. + + :param ldb: Ldb connection + :param name: Either DN or name of Authentication Policy + :raises: LookupError if not found + :raises: ValueError if name is not set + """ + if not name: + raise ValueError("Attribute 'name' is required.") + + try: + # It's possible name is already a Dn. + dn = name if isinstance(name, Dn) else Dn(ldb, name) + policy = AuthenticationPolicy.get(ldb, dn=dn) + except ValueError: + policy = AuthenticationPolicy.get(ldb, cn=name) + + if policy is None: + raise LookupError(f"Authentication policy {name} not found.") + + return policy diff --git a/python/samba/netcmd/domain/models/auth_silo.py b/python/samba/netcmd/domain/models/auth_silo.py new file mode 100644 index 0000000..9747671 --- /dev/null +++ b/python/samba/netcmd/domain/models/auth_silo.py @@ -0,0 +1,104 @@ +# Unix SMB/CIFS implementation. +# +# Authentication silo model. +# +# Copyright (C) Catalyst.Net Ltd. 2023 +# +# Written by Rob van der Linde +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +from ldb import FLAG_MOD_ADD, FLAG_MOD_DELETE, LdbError, Message, MessageElement + +from samba.sd_utils import escaped_claim_id + +from .exceptions import GrantMemberError, RevokeMemberError +from .fields import DnField, BooleanField, StringField +from .model import Model + + +class AuthenticationSilo(Model): + description = StringField("description") + enforced = BooleanField("msDS-AuthNPolicySiloEnforced") + user_authentication_policy = DnField("msDS-UserAuthNPolicy") + service_authentication_policy = DnField("msDS-ServiceAuthNPolicy") + computer_authentication_policy = DnField("msDS-ComputerAuthNPolicy") + members = DnField("msDS-AuthNPolicySiloMembers", many=True) + + @staticmethod + def get_base_dn(ldb): + """Return the base DN for the AuthenticationSilo model. + + :param ldb: Ldb connection + :return: Dn object of container + """ + base_dn = ldb.get_config_basedn() + base_dn.add_child( + "CN=AuthN Silos,CN=AuthN Policy Configuration,CN=Services") + return base_dn + + @staticmethod + def get_object_class(): + return "msDS-AuthNPolicySilo" + + def grant(self, ldb, member): + """Grant a member access to the Authentication Silo. + + Rather than saving the silo object and writing the entire member + list out again, just add one member only. + + :param ldb: Ldb connection + :param member: Member to grant access to silo + """ + # Create a message with only an add member operation. + message = Message(dn=self.dn) + message.add(MessageElement(str(member.dn), FLAG_MOD_ADD, + "msDS-AuthNPolicySiloMembers")) + + # Update authentication silo. + try: + ldb.modify(message) + except LdbError as e: + raise GrantMemberError(f"Failed to grant access to silo member: {e}") + + # If the modify operation was successful refresh members field. + self.refresh(ldb, fields=["members"]) + + def revoke(self, ldb, member): + """Revoke a member from the Authentication Silo. + + Rather than saving the silo object and writing the entire member + list out again, just remove one member only. + + :param ldb: Ldb connection + :param member: Member to revoke from silo + """ + # Create a message with only a remove member operation. + message = Message(dn=self.dn) + message.add(MessageElement(str(member.dn), FLAG_MOD_DELETE, + "msDS-AuthNPolicySiloMembers")) + + # Update authentication silo. + try: + ldb.modify(message) + except LdbError as e: + raise RevokeMemberError(f"Failed to revoke silo member: {e}") + + # If the modify operation was successful refresh members field. + self.refresh(ldb, fields=["members"]) + + def get_authentication_sddl(self): + return ('O:SYG:SYD:(XA;OICI;CR;;;WD;(@USER.ad://ext/' + f'AuthenticationSilo == "{escaped_claim_id(self.name)}"))') diff --git a/python/samba/netcmd/domain/models/claim_type.py b/python/samba/netcmd/domain/models/claim_type.py new file mode 100644 index 0000000..7e1c816 --- /dev/null +++ b/python/samba/netcmd/domain/models/claim_type.py @@ -0,0 +1,58 @@ +# Unix SMB/CIFS implementation. +# +# Claim type model. +# +# Copyright (C) Catalyst.Net Ltd. 2023 +# +# Written by Rob van der Linde +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +from .fields import BooleanField, DnField, IntegerField,\ + PossibleClaimValuesField, StringField +from .model import Model + + +class ClaimType(Model): + enabled = BooleanField("Enabled") + description = StringField("description") + display_name = StringField("displayName") + claim_attribute_source = DnField("msDS-ClaimAttributeSource") + claim_is_single_valued = BooleanField("msDS-ClaimIsSingleValued") + claim_is_value_space_restricted = BooleanField( + "msDS-ClaimIsValueSpaceRestricted") + claim_possible_values = PossibleClaimValuesField("msDS-ClaimPossibleValues") + claim_source_type = StringField("msDS-ClaimSourceType") + claim_type_applies_to_class = DnField( + "msDS-ClaimTypeAppliesToClass", many=True) + claim_value_type = IntegerField("msDS-ClaimValueType") + + @staticmethod + def get_base_dn(ldb): + """Return the base DN for the ClaimType model. + + :param ldb: Ldb connection + :return: Dn object of container + """ + base_dn = ldb.get_config_basedn() + base_dn.add_child("CN=Claim Types,CN=Claims Configuration,CN=Services") + return base_dn + + @staticmethod + def get_object_class(): + return "msDS-ClaimType" + + def __str__(self): + return str(self.display_name) diff --git a/python/samba/netcmd/domain/models/exceptions.py b/python/samba/netcmd/domain/models/exceptions.py new file mode 100644 index 0000000..14ebd77 --- /dev/null +++ b/python/samba/netcmd/domain/models/exceptions.py @@ -0,0 +1,64 @@ +# Unix SMB/CIFS implementation. +# +# Model and ORM exceptions. +# +# Copyright (C) Catalyst.Net Ltd. 2023 +# +# Written by Rob van der Linde +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +class ModelError(Exception): + pass + + +class FieldError(ModelError): + """A ModelError on a specific field.""" + + def __init__(self, *args, field=None): + self.field = field + super().__init__(*args) + + def __str__(self): + message = super().__str__() + return f"{self.field.name}: {message}" + + +class MultipleObjectsReturned(ModelError): + pass + + +class DoesNotExist(ModelError): + pass + + +class GrantMemberError(ModelError): + pass + + +class RevokeMemberError(ModelError): + pass + + +class ProtectError(ModelError): + pass + + +class UnprotectError(ModelError): + pass + + +class DeleteError(ModelError): + pass diff --git a/python/samba/netcmd/domain/models/fields.py b/python/samba/netcmd/domain/models/fields.py new file mode 100644 index 0000000..0b7e1eb --- /dev/null +++ b/python/samba/netcmd/domain/models/fields.py @@ -0,0 +1,507 @@ +# Unix SMB/CIFS implementation. +# +# Model fields. +# +# Copyright (C) Catalyst.Net Ltd. 2023 +# +# Written by Rob van der Linde +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +from enum import IntEnum + +import io +from abc import ABCMeta, abstractmethod +from datetime import datetime +from xml.etree import ElementTree + +from ldb import Dn, MessageElement, string_to_time, timestring +from samba.dcerpc import security +from samba.dcerpc.misc import GUID +from samba.ndr import ndr_pack, ndr_unpack + + +class Field(metaclass=ABCMeta): + """Base class for all fields. + + Each field will need to implement from_db_value and to_db_value. + + A field must correctly support converting both single valued fields, + and list type fields. + + The only thing many=True does is say the field "prefers" to be a list, + but really any field can be a list or single value. + """ + + def __init__(self, name, many=False, default=None, hidden=False, + readonly=False): + """Creates a new field, should be subclassed. + + :param name: Ldb field name. + :param many: If true always convert field to a list when loaded. + :param default: Default value or callback method (obj is first argument) + :param hidden: If this is True, exclude the field when calling as_dict() + :param readonly: If true don't write this value when calling save. + """ + self.name = name + self.many = many + self.hidden = hidden + self.readonly = readonly + + # This ensures that fields with many=True are always lists. + # If this is inconsistent anywhere, it isn't so great to use. + if self.many and default is None: + self.default = [] + else: + self.default = default + + @abstractmethod + def from_db_value(self, ldb, value): + """Converts value read from the database to Python value. + + :param ldb: Ldb connection + :param value: MessageElement value from the database + :returns: Parsed value as Python type + """ + pass + + @abstractmethod + def to_db_value(self, ldb, value, flags): + """Converts value to database value. + + This should return a MessageElement or None, where None means + the field will be unset on the next save. + + :param ldb: Ldb connection + :param value: Input value from Python field + :param flags: MessageElement flags + :returns: MessageElement or None + """ + pass + + +class IntegerField(Field): + """A simple integer field, can be an int or list of int.""" + + def from_db_value(self, ldb, value): + """Convert MessageElement to int or list of int.""" + if value is None: + return + elif len(value) > 1 or self.many: + return [int(item) for item in value] + else: + return int(value[0]) + + def to_db_value(self, ldb, value, flags): + """Convert int or list of int to MessageElement.""" + if value is None: + return + elif isinstance(value, list): + return MessageElement( + [str(item) for item in value], flags, self.name) + else: + return MessageElement(str(value), flags, self.name) + + +class BinaryField(Field): + """Similar to StringField but using bytes instead of str. + + This tends to be quite easy because a MessageElement already uses bytes. + """ + + def from_db_value(self, ldb, value): + """Convert MessageElement to bytes or list of bytes. + + The values on the MessageElement should already be bytes so the + cast to bytes() is likely not needed in from_db_value. + """ + if value is None: + return + elif len(value) > 1 or self.many: + return [bytes(item) for item in value] + else: + return bytes(value[0]) + + def to_db_value(self, ldb, value, flags): + """Convert bytes or list of bytes to MessageElement.""" + if value is None: + return + elif isinstance(value, list): + return MessageElement( + [bytes(item) for item in value], flags, self.name) + else: + return MessageElement(bytes(value), flags, self.name) + + +class StringField(Field): + """A simple string field, may contain str or list of str.""" + + def from_db_value(self, ldb, value): + """Convert MessageElement to str or list of str.""" + if value is None: + return + elif len(value) > 1 or self.many: + return [str(item) for item in value] + else: + return str(value) + + def to_db_value(self, ldb, value, flags): + """Convert str or list of str to MessageElement.""" + if value is None: + return + elif isinstance(value, list): + return MessageElement( + [str(item) for item in value], flags, self.name) + else: + return MessageElement(str(value), flags, self.name) + + +class EnumField(Field): + """A field based around Python's Enum type.""" + + def __init__(self, name, enum, many=False, default=None): + """Create a new EnumField for the given enum class.""" + self.enum = enum + super().__init__(name, many, default) + + def enum_from_value(self, value): + """Return Enum instance from value. + + Has a special case for IntEnum as the constructor only accepts int. + """ + if issubclass(self.enum, IntEnum): + return self.enum(int(str(value))) + else: + return self.enum(str(value)) + + def from_db_value(self, ldb, value): + """Convert MessageElement to enum or list of enum.""" + if value is None: + return + elif len(value) > 1 or self.many: + return [self.enum_from_value(item) for item in value] + else: + return self.enum_from_value(value) + + def to_db_value(self, ldb, value, flags): + """Convert enum or list of enum to MessageElement.""" + if value is None: + return + elif isinstance(value, list): + return MessageElement( + [str(item.value) for item in value], flags, self.name) + else: + return MessageElement(str(value.value), flags, self.name) + + +class DateTimeField(Field): + """A field for parsing ldb timestamps into Python datetime.""" + + def from_db_value(self, ldb, value): + """Convert MessageElement to datetime or list of datetime.""" + if value is None: + return + elif len(value) > 1 or self.many: + return [datetime.fromtimestamp(string_to_time(str(item))) + for item in value] + else: + return datetime.fromtimestamp(string_to_time(str(value))) + + def to_db_value(self, ldb, value, flags): + """Convert datetime or list of datetime to MessageElement.""" + if value is None: + return + elif isinstance(value, list): + return MessageElement( + [timestring(int(datetime.timestamp(item))) for item in value], + flags, self.name) + else: + return MessageElement(timestring(int(datetime.timestamp(value))), + flags, self.name) + + +class RelatedField(Field): + """A field that automatically fetches the related objects. + + Use sparingly, can be a little slow. If in doubt just use DnField instead. + """ + + def __init__(self, name, model, many=False, default=None): + """Create a new RelatedField for the given model.""" + self.model = model + super().__init__(name, many, default) + + def from_db_value(self, ldb, value): + """Convert Message element to related object or list of objects. + + Note that fetching related items is not using any sort of lazy + loading so use this field sparingly. + """ + if value is None: + return + elif len(value) > 1 or self.many: + return [self.model.get(ldb, dn=Dn(ldb, str(item))) for item in value] + else: + return self.model.get(ldb, dn=Dn(ldb, str(value))) + + def to_db_value(self, ldb, value, flags): + """Convert related object or list of objects to MessageElement.""" + if value is None: + return + elif isinstance(value, list): + return MessageElement( + [str(item.dn) for item in value], flags, self.name) + else: + return MessageElement(str(value.dn), flags, self.name) + + +class DnField(Field): + """A Dn field parses the current field into a Dn object.""" + + def from_db_value(self, ldb, value): + """Convert MessageElement to a Dn object or list of Dn objects.""" + if value is None: + return + elif isinstance(value, Dn): + return value + elif len(value) > 1 or self.many: + return [Dn(ldb, str(item)) for item in value] + else: + return Dn(ldb, str(value)) + + def to_db_value(self, ldb, value, flags): + """Convert Dn object or list of Dn objects into a MessageElement.""" + if value is None: + return + elif isinstance(value, list): + return MessageElement( + [str(item) for item in value], flags, self.name) + else: + return MessageElement(str(value), flags, self.name) + + +class GUIDField(Field): + """A GUID field decodes fields containing binary GUIDs.""" + + def from_db_value(self, ldb, value): + """Convert MessageElement with a GUID into a str or list of str.""" + if value is None: + return + elif len(value) > 1 or self.many: + return [str(ndr_unpack(GUID, item)) for item in value] + else: + return str(ndr_unpack(GUID, value[0])) + + def to_db_value(self, ldb, value, flags): + """Convert str with GUID into MessageElement.""" + if value is None: + return + elif isinstance(value, list): + return MessageElement( + [ndr_pack(GUID(item)) for item in value], flags, self.name) + else: + return MessageElement(ndr_pack(GUID(value)), flags, self.name) + + +class SIDField(Field): + """A SID field encodes and decodes SID data.""" + + def from_db_value(self, ldb, value): + """Convert MessageElement with a GUID into a str or list of str.""" + if value is None: + return + elif len(value) > 1 or self.many: + return [str(ndr_unpack(security.dom_sid, item)) for item in value] + else: + return str(ndr_unpack(security.dom_sid, value[0])) + + def to_db_value(self, ldb, value, flags): + """Convert str with GUID into MessageElement.""" + if value is None: + return + elif isinstance(value, list): + return MessageElement( + [ndr_pack(security.dom_sid(item)) for item in value], + flags, self.name) + else: + return MessageElement(ndr_pack(security.dom_sid(value)), + flags, self.name) + + +class SDDLField(Field): + """A SDDL field encodes and decodes SDDL data.""" + + def __init__(self, + name, + *, + many=False, + default=None, + hidden=False, + allow_device_in_sddl=True): + """Create a new SDDLField.""" + self.allow_device_in_sddl = allow_device_in_sddl + super().__init__(name, many=many, default=default, hidden=hidden) + + def from_db_value(self, ldb, value): + if value is None: + return + elif len(value) > 1 or self.many: + return [ndr_unpack(security.descriptor, item).as_sddl() + for item in value] + else: + return ndr_unpack(security.descriptor, value[0]).as_sddl() + + def to_db_value(self, ldb, value, flags): + domain_sid = security.dom_sid(ldb.get_domain_sid()) + if value is None: + return + elif isinstance(value, list): + return MessageElement([ndr_pack(security.descriptor.from_sddl( + item, + domain_sid, + allow_device_in_sddl=self.allow_device_in_sddl)) + for item in value], + flags, + self.name) + else: + return MessageElement( + ndr_pack(security.descriptor.from_sddl( + value, + domain_sid, + allow_device_in_sddl=self.allow_device_in_sddl)), + flags, + self.name + ) + + +class BooleanField(Field): + """A simple boolean field, can be a bool or list of bool.""" + + def from_db_value(self, ldb, value): + """Convert MessageElement into a bool or list of bool.""" + if value is None: + return + elif len(value) > 1 or self.many: + return [str(item) == "TRUE" for item in value] + else: + return str(value) == "TRUE" + + def to_db_value(self, ldb, value, flags): + """Convert bool or list of bool into a MessageElement.""" + if value is None: + return + elif isinstance(value, list): + return MessageElement( + [str(bool(item)).upper() for item in value], flags, self.name) + else: + return MessageElement(str(bool(value)).upper(), flags, self.name) + + +class PossibleClaimValuesField(Field): + """Field for parsing possible values XML for claim types. + + This field will be represented by a list of dicts as follows: + + [ + {"ValueGUID": }, + {"ValueDisplayName: "Display name"}, + {"ValueDescription: "Optional description or None for no description"}, + {"Value": }, + ] + + Note that the GUID needs to be created client-side when adding entries, + leaving it as None then saving it doesn't generate the GUID. + + The field itself just converts the XML to list and vice versa, it doesn't + automatically generate GUIDs for entries, this is entirely up to the caller. + """ + + # Namespaces for PossibleValues xml parsing. + NAMESPACE = { + "xsd": "http://www.w3.org/2001/XMLSchema", + "xsi": "http://www.w3.org/2001/XMLSchema-instance", + "": "http://schemas.microsoft.com/2010/08/ActiveDirectory/PossibleValues" + } + + def from_db_value(self, ldb, value): + """Parse MessageElement with XML to list of dicts.""" + if value is not None: + root = ElementTree.fromstring(str(value)) + string_list = root.find("StringList", self.NAMESPACE) + + values = [] + for item in string_list.findall("Item", self.NAMESPACE): + values.append({ + "ValueGUID": item.find("ValueGUID", self.NAMESPACE).text, + "ValueDisplayName": item.find("ValueDisplayName", + self.NAMESPACE).text, + "ValueDescription": item.find("ValueDescription", + self.NAMESPACE).text, + "Value": item.find("Value", self.NAMESPACE).text, + }) + + return values + + def to_db_value(self, ldb, value, flags): + """Convert list of dicts back to XML as a MessageElement.""" + if value is None: + return + + # Possible values should always be a list of dict, but for consistency + # with other fields just wrap a single value into a list and continue. + if isinstance(value, list): + possible_values = value + else: + possible_values = [value] + + # No point storing XML of an empty list. + # Return None, the field will be unset on the next save. + if len(possible_values) == 0: + return + + # root node + root = ElementTree.Element("PossibleClaimValues") + for name, url in self.NAMESPACE.items(): + if name == "": + root.set("xmlns", url) + else: + root.set(f"xmlns:{name}", url) + + # StringList node + string_list = ElementTree.SubElement(root, "StringList") + + # List of values + for item_dict in possible_values: + item = ElementTree.SubElement(string_list, "Item") + item_guid = ElementTree.SubElement(item, "ValueGUID") + item_guid.text = item_dict["ValueGUID"] + item_name = ElementTree.SubElement(item, "ValueDisplayName") + item_name.text = item_dict["ValueDisplayName"] + item_desc = ElementTree.SubElement(item, "ValueDescription") + item_desc.text = item_dict["ValueDescription"] + item_value = ElementTree.SubElement(item, "Value") + item_value.text = item_dict["Value"] + + # NOTE: indent was only added in Python 3.9 so can't be used yet. + # ElementTree.indent(root, space="\t", level=0) + + out = io.BytesIO() + ElementTree.ElementTree(root).write(out, + encoding="utf-16", + xml_declaration=True, + short_empty_elements=False) + + # Back to str as that is what MessageElement needs. + return MessageElement(out.getvalue().decode("utf-16"), flags, self.name) diff --git a/python/samba/netcmd/domain/models/group.py b/python/samba/netcmd/domain/models/group.py new file mode 100644 index 0000000..9473127 --- /dev/null +++ b/python/samba/netcmd/domain/models/group.py @@ -0,0 +1,42 @@ +# Unix SMB/CIFS implementation. +# +# Group model. +# +# Copyright (C) Catalyst.Net Ltd. 2023 +# +# Written by Rob van der Linde +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +from .fields import BooleanField, DnField, IntegerField, SIDField, StringField +from .model import Model + + +class Group(Model): + admin_count = IntegerField("adminCount") + description = StringField("description") + is_critical_system_object = BooleanField("isCriticalSystemObject", + default=False, readonly=True) + member = DnField("member", many=True) + object_sid = SIDField("objectSid") + system_flags = IntegerField("systemFlags") + + @staticmethod + def get_object_class(): + return "group" + + def get_authentication_sddl(self): + return "O:SYG:SYD:(XA;OICI;CR;;;WD;(Member_of_any {SID(%s)}))" % ( + self.object_sid) diff --git a/python/samba/netcmd/domain/models/model.py b/python/samba/netcmd/domain/models/model.py new file mode 100644 index 0000000..602c6ca --- /dev/null +++ b/python/samba/netcmd/domain/models/model.py @@ -0,0 +1,426 @@ +# Unix SMB/CIFS implementation. +# +# Model and basic ORM for the Ldb database. +# +# Copyright (C) Catalyst.Net Ltd. 2023 +# +# Written by Rob van der Linde +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import inspect +from abc import ABCMeta, abstractmethod + +from ldb import ERR_NO_SUCH_OBJECT, FLAG_MOD_ADD, FLAG_MOD_REPLACE, LdbError,\ + Message, MessageElement, SCOPE_BASE, SCOPE_SUBTREE, binary_encode +from samba.sd_utils import SDUtils + +from .exceptions import DeleteError, DoesNotExist, FieldError,\ + ProtectError, UnprotectError +from .fields import DateTimeField, DnField, Field, GUIDField, IntegerField,\ + StringField +from .query import Query + +# Keeps track of registered models. +# This gets populated by the ModelMeta class. +MODELS = {} + + +class ModelMeta(ABCMeta): + + def __new__(mcls, name, bases, namespace, **kwargs): + cls = super().__new__(mcls, name, bases, namespace, **kwargs) + + if cls.__name__ != "Model": + cls.fields = dict(inspect.getmembers(cls, lambda f: isinstance(f, Field))) + cls.meta = mcls + MODELS[name] = cls + + return cls + + +class Model(metaclass=ModelMeta): + cn = StringField("cn") + distinguished_name = DnField("distinguishedName") + dn = DnField("dn") + ds_core_propagation_data = DateTimeField("dsCorePropagationData", + hidden=True) + instance_type = IntegerField("instanceType") + name = StringField("name") + object_category = DnField("objectCategory") + object_class = StringField("objectClass", + default=lambda obj: obj.get_object_class()) + object_guid = GUIDField("objectGUID") + usn_changed = IntegerField("uSNChanged", hidden=True) + usn_created = IntegerField("uSNCreated", hidden=True) + when_changed = DateTimeField("whenChanged", hidden=True) + when_created = DateTimeField("whenCreated", hidden=True) + + def __init__(self, **kwargs): + """Create a new model instance and optionally populate fields. + + Does not save the object to the database, call .save() for that. + + :param kwargs: Optional input fields to populate object with + """ + # Used by the _apply method, holds the original ldb Message, + # which is used by save() to determine what fields changed. + self._message = None + + for field_name, field in self.fields.items(): + if field_name in kwargs: + default = kwargs[field_name] + elif callable(field.default): + default = field.default(self) + else: + default = field.default + + setattr(self, field_name, default) + + def __repr__(self): + """Return object representation for this model.""" + return f"<{self.__class__.__name__}: {self}>" + + def __str__(self): + """Stringify model instance to implement in each model.""" + return str(self.cn) + + def __eq__(self, other): + """Basic object equality check only really checks if the dn matches. + + :param other: The other object to compare with + """ + if other is None: + return False + else: + return self.dn == other.dn + + def __json__(self): + """Automatically called by custom JSONEncoder class. + + When turning an object into json any fields of type RelatedField + will also end up calling this method. + """ + if self.dn is not None: + return str(self.dn) + + @staticmethod + def get_base_dn(ldb): + """Return the base DN for the container of this model. + + :param ldb: Ldb connection + :return: Dn to use for new objects + """ + return ldb.get_default_basedn() + + @classmethod + def get_search_dn(cls, ldb): + """Return the DN used for querying. + + By default, this just calls get_base_dn, but it is possible to + return a different Dn for querying. + + :param ldb: Ldb connection + :return: Dn to use for searching + """ + return cls.get_base_dn(ldb) + + @staticmethod + @abstractmethod + def get_object_class(): + """Returns the objectClass for this model.""" + pass + + @classmethod + def from_message(cls, ldb, message): + """Create a new model instance from the Ldb Message object. + + :param ldb: Ldb connection + :param message: Ldb Message object to create instance from + """ + obj = cls() + obj._apply(ldb, message) + return obj + + def _apply(self, ldb, message): + """Internal method to apply Ldb Message to current object. + + :param ldb: Ldb connection + :param message: Ldb Message object to apply + """ + # Store the ldb Message so that in save we can see what changed. + self._message = message + + for attr, field in self.fields.items(): + if field.name in message: + setattr(self, attr, field.from_db_value(ldb, message[field.name])) + + def refresh(self, ldb, fields=None): + """Refresh object from database. + + :param ldb: Ldb connection + :param fields: Optional list of field names to refresh + """ + attrs = [self.fields[f].name for f in fields] if fields else None + + # This shouldn't normally happen but in case the object refresh fails. + try: + res = ldb.search(self.dn, scope=SCOPE_BASE, attrs=attrs) + except LdbError as e: + if e.args[0] == ERR_NO_SUCH_OBJECT: + raise DoesNotExist(f"Refresh failed, object gone: {self.dn}") + raise + + self._apply(ldb, res[0]) + + def as_dict(self, include_hidden=False): + """Returns a dict representation of the model. + + :param include_hidden: Include fields with hidden=True when set + :returns: dict representation of model using Ldb field names as keys + """ + obj_dict = {} + + for attr, field in self.fields.items(): + if not field.hidden or include_hidden: + value = getattr(self, attr) + if value is not None: + obj_dict[field.name] = value + + return obj_dict + + @classmethod + def build_expression(cls, **kwargs): + """Build LDAP search expression from kwargs. + + :kwargs: fields to use for expression using model field names + """ + # Take a copy, never modify the original if it can be avoided. + # Then always add the object_class to the search criteria. + criteria = dict(kwargs) + criteria["object_class"] = cls.get_object_class() + + # Build search expression. + num_fields = len(criteria) + expression = "" if num_fields == 1 else "(&" + + for field_name, value in criteria.items(): + field = cls.fields.get(field_name) + if not field: + raise ValueError(f"Unknown field '{field_name}'") + expression += f"({field.name}={binary_encode(value)})" + + if num_fields > 1: + expression += ")" + + return expression + + @classmethod + def query(cls, ldb, **kwargs): + """Returns a search query for this model. + + :param ldb: Ldb connection + :param kwargs: Search criteria as keyword args + """ + base_dn = cls.get_search_dn(ldb) + + # If the container does not exist produce a friendly error message. + try: + result = ldb.search(base_dn, + scope=SCOPE_SUBTREE, + expression=cls.build_expression(**kwargs)) + except LdbError as e: + if e.args[0] == ERR_NO_SUCH_OBJECT: + raise DoesNotExist(f"Container does not exist: {base_dn}") + raise + + return Query(cls, ldb, result) + + @classmethod + def get(cls, ldb, **kwargs): + """Get one object, must always return one item. + + Either find object by dn=, or any combination of attributes via kwargs. + If there are more than one result, MultipleObjectsReturned is raised. + + :param ldb: Ldb connection + :param kwargs: Search criteria as keyword args + :returns: Model instance or None if not found + :raises: MultipleObjects returned if there are more than one results + """ + # If a DN is provided use that to get the object directly. + # Otherwise, build a search expression using kwargs provided. + dn = kwargs.get("dn") + + if dn: + # Handle LDAP error 32 LDAP_NO_SUCH_OBJECT, but raise for the rest. + # Return None if the User does not exist. + try: + res = ldb.search(dn, scope=SCOPE_BASE) + except LdbError as e: + if e.args[0] == ERR_NO_SUCH_OBJECT: + return None + else: + raise + + return cls.from_message(ldb, res[0]) + else: + return cls.query(ldb, **kwargs).get() + + @classmethod + def create(cls, ldb, **kwargs): + """Create object constructs object and calls save straight after. + + :param ldb: Ldb connection + :param kwargs: Fields to populate object from + :returns: object + """ + obj = cls(**kwargs) + obj.save(ldb) + return obj + + @classmethod + def get_or_create(cls, ldb, defaults=None, **kwargs): + """Retrieve object and if it doesn't exist create a new instance. + + :param ldb: Ldb connection + :param defaults: Attributes only used for create but not search + :param kwargs: Attributes used for searching existing object + :returns: (object, bool created) + """ + obj = cls.get(ldb, **kwargs) + if obj is None: + attrs = dict(kwargs) + if defaults is not None: + attrs.update(defaults) + return cls.create(ldb, **attrs), True + else: + return obj, False + + def save(self, ldb): + """Save model to Ldb database. + + The save operation will save all fields excluding fields that + return None when calling their `to_db_value` methods. + + The `to_db_value` method can either return a ldb Message object, + or None if the field is to be excluded. + + For updates, the existing object is fetched and only fields + that are changed are included in the update ldb Message. + + Also for updates, any fields that currently have a value, + but are to be set to None will be seen as a delete operation. + + After the save operation the object is refreshed from the server, + as often the server will populate some fields. + + :param ldb: Ldb connection + """ + if self.dn is None: + dn = self.get_base_dn(ldb) + dn.add_child(f"CN={self.cn or self.name}") + self.dn = dn + + message = Message(dn=self.dn) + for attr, field in self.fields.items(): + if attr != "dn" and not field.readonly: + value = getattr(self, attr) + try: + db_value = field.to_db_value(ldb, value, FLAG_MOD_ADD) + except ValueError as e: + raise FieldError(e, field=field) + + # Don't add empty fields. + if db_value is not None and len(db_value): + message.add(db_value) + + # Create object + ldb.add(message) + + # Fetching object refreshes any automatically populated fields. + res = ldb.search(dn, scope=SCOPE_BASE) + self._apply(ldb, res[0]) + else: + # Existing Message was stored to work out what fields changed. + existing_obj = self.from_message(ldb, self._message) + + # Only modify replace or modify fields that have changed. + # Any fields that are set to None or an empty list get unset. + message = Message(dn=self.dn) + for attr, field in self.fields.items(): + if attr != "dn" and not field.readonly: + value = getattr(self, attr) + old_value = getattr(existing_obj, attr) + + if value != old_value: + try: + db_value = field.to_db_value(ldb, value, + FLAG_MOD_REPLACE) + except ValueError as e: + raise FieldError(e, field=field) + + # When a field returns None or empty list, delete attr. + if db_value in (None, []): + db_value = MessageElement([], + FLAG_MOD_REPLACE, + field.name) + message.add(db_value) + + # Saving nothing only triggers an error. + if len(message): + ldb.modify(message) + + # Fetching object refreshes any automatically populated fields. + self.refresh(ldb) + + def delete(self, ldb): + """Delete item from Ldb database. + + If self.dn is None then the object has not yet been saved. + + :param ldb: Ldb connection + """ + if self.dn is None: + raise DeleteError("Cannot delete object that doesn't have a dn.") + + try: + ldb.delete(self.dn) + except LdbError as e: + raise DeleteError(f"Delete failed: {e}") + + def protect(self, ldb): + """Protect object from accidental deletion. + + :param ldb: Ldb connection + """ + utils = SDUtils(ldb) + + try: + utils.dacl_add_ace(self.dn, "(D;;DTSD;;;WD)") + except LdbError as e: + raise ProtectError(f"Failed to protect object: {e}") + + def unprotect(self, ldb): + """Unprotect object from accidental deletion. + + :param ldb: Ldb connection + """ + utils = SDUtils(ldb) + + try: + utils.dacl_delete_aces(self.dn, "(D;;DTSD;;;WD)") + except LdbError as e: + raise UnprotectError(f"Failed to unprotect object: {e}") diff --git a/python/samba/netcmd/domain/models/query.py b/python/samba/netcmd/domain/models/query.py new file mode 100644 index 0000000..9cdb650 --- /dev/null +++ b/python/samba/netcmd/domain/models/query.py @@ -0,0 +1,81 @@ +# Unix SMB/CIFS implementation. +# +# Query class for the ORM to the Ldb database. +# +# Copyright (C) Catalyst.Net Ltd. 2023 +# +# Written by Rob van der Linde +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import re + +from .exceptions import DoesNotExist, MultipleObjectsReturned + +RE_SPLIT_CAMELCASE = re.compile(r"[A-Z](?:[a-z]+|[A-Z]*(?=[A-Z]|$))") + + +class Query: + """Simple Query class used by the `Model.query` method.""" + + def __init__(self, model, ldb, result): + self.model = model + self.ldb = ldb + self.result = result + self.count = result.count + self.name = " ".join(RE_SPLIT_CAMELCASE.findall(model.__name__)).lower() + + def __iter__(self): + """Loop over Query class yields Model instances.""" + for message in self.result: + yield self.model.from_message(self.ldb, message) + + def first(self): + """Returns the first item in the Query or None for no results.""" + if self.result.count: + return self.model.from_message(self.ldb, self.result[0]) + + def last(self): + """Returns the last item in the Query or None for no results.""" + if self.result.count: + return self.model.from_message(self.ldb, self.result[-1]) + + def get(self): + """Returns one item or None if no results were found. + + :returns: Model instance or None if not found. + :raises MultipleObjectsReturned: if more than one results were returned + """ + if self.count > 1: + raise MultipleObjectsReturned( + f"More than one {self.name} objects returned (got {self.count}).") + elif self.count: + return self.model.from_message(self.ldb, self.result[0]) + + def one(self): + """Must return EXACTLY one item or raise an exception. + + :returns: Model instance + :raises DoesNotExist: if no results were returned + :raises MultipleObjectsReturned: if more than one results were returned + """ + if self.count < 1: + raise DoesNotExist( + f"{self.name.capitalize()} matching query not found") + elif self.count > 1: + raise MultipleObjectsReturned( + f"More than one {self.name} objects returned (got {self.count}).") + else: + return self.model.from_message(self.ldb, self.result[0]) diff --git a/python/samba/netcmd/domain/models/schema.py b/python/samba/netcmd/domain/models/schema.py new file mode 100644 index 0000000..59ece05 --- /dev/null +++ b/python/samba/netcmd/domain/models/schema.py @@ -0,0 +1,124 @@ +# Unix SMB/CIFS implementation. +# +# Class and attribute schema models. +# +# Copyright (C) Catalyst.Net Ltd. 2023 +# +# Written by Rob van der Linde +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +from .fields import BinaryField, BooleanField, DnField, GUIDField,\ + IntegerField, StringField +from .model import Model + + +class ClassSchema(Model): + default_object_category = DnField("defaultObjectCategory") + governs_id = StringField("governsID") + schema_id_guid = GUIDField("schemaIDGUID") + subclass_of = StringField("subclassOf") + admin_description = StringField("adminDescription") + admin_display_name = StringField("adminDisplayName") + default_hiding_value = BooleanField("defaultHidingValue") + default_security_descriptor = BinaryField("defaultSecurityDescriptor") + ldap_display_name = StringField("lDAPDisplayName") + may_contain = StringField("mayContain", many=True) + poss_superiors = StringField("possSuperiors", many=True) + rdn_att_id = StringField("rDNAttID") + show_in_advanced_view_only = BooleanField("showInAdvancedViewOnly") + system_only = BooleanField("systemOnly", readonly=True) + + @staticmethod + def get_base_dn(ldb): + """Return the base DN for the ClassSchema model. + + This is the same as AttributeSchema, but the objectClass is different. + + :param ldb: Ldb connection + :return: Dn object of container + """ + return ldb.get_schema_basedn() + + @staticmethod + def get_object_class(): + return "classSchema" + + @classmethod + def lookup(cls, ldb, name): + """Helper function to lookup class or raise LookupError. + + :param ldb: Ldb connection + :param name: Class name + :raises: LookupError if not found + :raises: ValueError if name is not provided + """ + if not name: + raise ValueError("Class name is required.") + + attr = cls.get(ldb, ldap_display_name=name) + if attr is None: + raise LookupError(f"Could not locate {name} in class schema.") + + return attr + + +class AttributeSchema(Model): + attribute_id = StringField("attributeID") + attribute_syntax = StringField("attributeSyntax") + is_single_valued = BooleanField("isSingleValued") + ldap_display_name = StringField("lDAPDisplayName") + om_syntax = IntegerField("oMSyntax") + admin_description = StringField("adminDescription") + admin_display_name = StringField("adminDisplayName") + attribute_security_guid = GUIDField("attributeSecurityGUID") + schema_flags_ex = IntegerField("schemaFlagsEx") + search_flags = IntegerField("searchFlags") + show_in_advanced_view_only = BooleanField("showInAdvancedViewOnly") + system_flags = IntegerField("systemFlags", readonly=True) + system_only = BooleanField("systemOnly", readonly=True) + + @staticmethod + def get_base_dn(ldb): + """Return the base DN for the AttributeSchema model. + + This is the same as ClassSchema, but the objectClass is different. + + :param ldb: Ldb connection + :return: Dn object of container + """ + return ldb.get_schema_basedn() + + @staticmethod + def get_object_class(): + return "attributeSchema" + + @classmethod + def lookup(cls, ldb, name): + """Helper function to lookup attribute or raise LookupError. + + :param ldb: Ldb connection + :param name: Attribute name + :raises: LookupError if not found + :raises: ValueError if name is not provided + """ + if not name: + raise ValueError("Attribute name is required.") + + attr = cls.get(ldb, ldap_display_name=name) + if attr is None: + raise LookupError(f"Could not locate {name} in attribute schema.") + + return attr diff --git a/python/samba/netcmd/domain/models/site.py b/python/samba/netcmd/domain/models/site.py new file mode 100644 index 0000000..44643f3 --- /dev/null +++ b/python/samba/netcmd/domain/models/site.py @@ -0,0 +1,47 @@ +# Unix SMB/CIFS implementation. +# +# Site model. +# +# Copyright (C) Catalyst.Net Ltd. 2023 +# +# Written by Rob van der Linde +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +from .fields import BooleanField, DnField, IntegerField +from .model import Model + + +class Site(Model): + show_in_advanced_view_only = BooleanField("showInAdvancedViewOnly") + system_flags = IntegerField("systemFlags", readonly=True) + + # Backlinks + site_object_bl = DnField("siteObjectBL", readonly=True) + + @staticmethod + def get_base_dn(ldb): + """Return the base DN for the Site model. + + :param ldb: Ldb connection + :return: Dn to use for new objects + """ + base_dn = ldb.get_config_basedn() + base_dn.add_child("CN=Sites") + return base_dn + + @staticmethod + def get_object_class(): + return "site" diff --git a/python/samba/netcmd/domain/models/subnet.py b/python/samba/netcmd/domain/models/subnet.py new file mode 100644 index 0000000..bb249d4 --- /dev/null +++ b/python/samba/netcmd/domain/models/subnet.py @@ -0,0 +1,45 @@ +# Unix SMB/CIFS implementation. +# +# Subnet model. +# +# Copyright (C) Catalyst.Net Ltd. 2023 +# +# Written by Rob van der Linde +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +from .fields import BooleanField, DnField, IntegerField +from .model import Model + + +class Subnet(Model): + show_in_advanced_view_only = BooleanField("showInAdvancedViewOnly") + site_object = DnField("siteObject") + system_flags = IntegerField("systemFlags", readonly=True) + + @staticmethod + def get_base_dn(ldb): + """Return the base DN for the Subnet model. + + :param ldb: Ldb connection + :return: Dn to use for new objects + """ + base_dn = ldb.get_config_basedn() + base_dn.add_child("CN=Subnets,CN=Sites") + return base_dn + + @staticmethod + def get_object_class(): + return "subnet" diff --git a/python/samba/netcmd/domain/models/user.py b/python/samba/netcmd/domain/models/user.py new file mode 100644 index 0000000..7b0785a --- /dev/null +++ b/python/samba/netcmd/domain/models/user.py @@ -0,0 +1,75 @@ +# Unix SMB/CIFS implementation. +# +# User model. +# +# Copyright (C) Catalyst.Net Ltd. 2023 +# +# Written by Rob van der Linde +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +from ldb import Dn + +from samba.dsdb import DS_GUID_USERS_CONTAINER + +from .fields import DnField, SIDField, StringField +from .model import Model + + +class User(Model): + username = StringField("sAMAccountName") + assigned_policy = DnField("msDS-AssignedAuthNPolicy") + assigned_silo = DnField("msDS-AssignedAuthNPolicySilo") + object_sid = SIDField("objectSid") + + def __str__(self): + """Return username rather than cn for User model.""" + return self.username + + @staticmethod + def get_base_dn(ldb): + """Return the base DN for the User model. + + :param ldb: Ldb connection + :return: Dn to use for new objects + """ + return ldb.get_wellknown_dn(ldb.get_default_basedn(), + DS_GUID_USERS_CONTAINER) + + @classmethod + def get_search_dn(cls, ldb): + """Return Dn used for searching so Computers will also be found. + + :param ldb: Ldb connection + :return: Dn to use for searching + """ + return ldb.get_root_basedn() + + @staticmethod + def get_object_class(): + return "user" + + @classmethod + def find(cls, ldb, name): + """Helper function to find a user first by Dn then username. + + If the Dn can't be parsed, use sAMAccountName instead. + """ + try: + query = {"dn": Dn(ldb, name)} + except ValueError: + query = {"username": name} + + return cls.get(ldb, **query) diff --git a/python/samba/netcmd/domain/models/value_type.py b/python/samba/netcmd/domain/models/value_type.py new file mode 100644 index 0000000..00a4e07 --- /dev/null +++ b/python/samba/netcmd/domain/models/value_type.py @@ -0,0 +1,96 @@ +# Unix SMB/CIFS implementation. +# +# Claim value type model. +# +# Copyright (C) Catalyst.Net Ltd. 2023 +# +# Written by Rob van der Linde +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +from .fields import BooleanField, DnField, IntegerField, StringField +from .model import Model + +# LDAP Syntax to Value Type CN lookup table. +# These are the lookups used by known AD attributes, add new ones as required. +SYNTAX_TO_VALUE_TYPE_CN = { + "2.5.5.1": "MS-DS-Text", # Object(DS-DN) + "2.5.5.2": "MS-DS-Text", # String(Object-Identifier) + "2.5.5.8": "MS-DS-YesNo", # Boolean + "2.5.5.9": "MS-DS-Number", # Integer + "2.5.5.12": "MS-DS-Text", # String(Unicode) + "2.5.5.15": "MS-DS-Text", # String(NT-Sec-Desc) + "2.5.5.16": "MS-DS-Number", # LargeInteger +} + + +class ValueType(Model): + description = StringField("description") + display_name = StringField("displayName") + claim_is_single_valued = BooleanField("msDS-ClaimIsSingleValued") + claim_is_value_space_restricted = BooleanField( + "msDS-ClaimIsValueSpaceRestricted") + claim_value_type = IntegerField("msDS-ClaimValueType") + is_possible_values_present = BooleanField("msDS-IsPossibleValuesPresent") + show_in_advanced_view_only = BooleanField("showInAdvancedViewOnly") + + # Backlinks + value_type_reference_bl = DnField( + "msDS-ValueTypeReferenceBL", readonly=True) + + @staticmethod + def get_base_dn(ldb): + """Return the base DN for the ValueType model. + + :param ldb: Ldb connection + :return: Dn object of container + """ + base_dn = ldb.get_config_basedn() + base_dn.add_child("CN=Value Types,CN=Claims Configuration,CN=Services") + return base_dn + + @staticmethod + def get_object_class(): + return "msDS-ValueType" + + @classmethod + def lookup(cls, ldb, attribute): + """Helper function to get ValueType by attribute or raise LookupError. + + :param ldb: Ldb connection + :param attribute: AttributeSchema object + :raises: LookupError if not found + :raises: ValueError for unknown attribute syntax + """ + # If attribute is None. + if not attribute: + raise ValueError("Attribute is required for value type lookup.") + + # Unknown attribute syntax as it isn't in the lookup table. + syntax = attribute.attribute_syntax + cn = SYNTAX_TO_VALUE_TYPE_CN.get(syntax) + if not cn: + raise ValueError(f"Unable to process attribute syntax {syntax}") + + # This should always return something but should still be handled. + value_type = cls.get(ldb, cn=cn) + if value_type is None: + raise LookupError( + f"Could not find claim value type for {attribute}.") + + return value_type + + def __str__(self): + return str(self.display_name) diff --git a/python/samba/netcmd/domain/passwordsettings.py b/python/samba/netcmd/domain/passwordsettings.py new file mode 100644 index 0000000..d0cf47b --- /dev/null +++ b/python/samba/netcmd/domain/passwordsettings.py @@ -0,0 +1,316 @@ +# domain management - domain passwordsettings +# +# Copyright Matthias Dieter Wallnoefer 2009 +# Copyright Andrew Kroeger 2009 +# Copyright Jelmer Vernooij 2007-2012 +# Copyright Giampaolo Lauria 2011 +# Copyright Matthieu Patou 2011 +# Copyright Andrew Bartlett 2008-2015 +# Copyright Stefan Metzmacher 2012 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import ldb +import samba.getopt as options +from samba.auth import system_session +from samba.dcerpc.samr import (DOMAIN_PASSWORD_COMPLEX, + DOMAIN_PASSWORD_STORE_CLEARTEXT) +from samba.netcmd import Command, CommandError, Option, SuperCommand +from samba.netcmd.common import (NEVER_TIMESTAMP, timestamp_to_days, + timestamp_to_mins) +from samba.netcmd.pso import cmd_domain_passwordsettings_pso +from samba.samdb import SamDB + + +class cmd_domain_passwordsettings_show(Command): + """Display current password settings for the domain.""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H"), + ] + + def run(self, H=None, credopts=None, sambaopts=None, versionopts=None): + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp) + + samdb = SamDB(url=H, session_info=system_session(), + credentials=creds, lp=lp) + + domain_dn = samdb.domain_dn() + res = samdb.search(domain_dn, scope=ldb.SCOPE_BASE, + attrs=["pwdProperties", "pwdHistoryLength", "minPwdLength", + "minPwdAge", "maxPwdAge", "lockoutDuration", "lockoutThreshold", + "lockOutObservationWindow"]) + assert(len(res) == 1) + try: + pwd_props = int(res[0]["pwdProperties"][0]) + pwd_hist_len = int(res[0]["pwdHistoryLength"][0]) + cur_min_pwd_len = int(res[0]["minPwdLength"][0]) + # ticks -> days + cur_min_pwd_age = timestamp_to_days(res[0]["minPwdAge"][0]) + cur_max_pwd_age = timestamp_to_days(res[0]["maxPwdAge"][0]) + + cur_account_lockout_threshold = int(res[0]["lockoutThreshold"][0]) + + # ticks -> mins + cur_account_lockout_duration = timestamp_to_mins(res[0]["lockoutDuration"][0]) + cur_reset_account_lockout_after = timestamp_to_mins(res[0]["lockOutObservationWindow"][0]) + except Exception as e: + raise CommandError("Could not retrieve password properties!", e) + + self.message("Password information for domain '%s'" % domain_dn) + self.message("") + if pwd_props & DOMAIN_PASSWORD_COMPLEX != 0: + self.message("Password complexity: on") + else: + self.message("Password complexity: off") + if pwd_props & DOMAIN_PASSWORD_STORE_CLEARTEXT != 0: + self.message("Store plaintext passwords: on") + else: + self.message("Store plaintext passwords: off") + self.message("Password history length: %d" % pwd_hist_len) + self.message("Minimum password length: %d" % cur_min_pwd_len) + self.message("Minimum password age (days): %d" % cur_min_pwd_age) + self.message("Maximum password age (days): %d" % cur_max_pwd_age) + self.message("Account lockout duration (mins): %d" % cur_account_lockout_duration) + self.message("Account lockout threshold (attempts): %d" % cur_account_lockout_threshold) + self.message("Reset account lockout after (mins): %d" % cur_reset_account_lockout_after) + + +class cmd_domain_passwordsettings_set(Command): + """Set password settings. + + Password complexity, password lockout policy, history length, + minimum password length, the minimum and maximum password age) on + a Samba AD DC server. + + Use against a Windows DC is possible, but group policy will override it. + """ + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H"), + Option("-q", "--quiet", help="Be quiet", action="store_true"), # unused + Option("--complexity", type="choice", choices=["on", "off", "default"], + help="The password complexity (on | off | default). Default is 'on'"), + Option("--store-plaintext", type="choice", choices=["on", "off", "default"], + help="Store plaintext passwords where account have 'store passwords with reversible encryption' set (on | off | default). Default is 'off'"), + Option("--history-length", + help="The password history length ( | default). Default is 24.", type=str), + Option("--min-pwd-length", + help="The minimum password length ( | default). Default is 7.", type=str), + Option("--min-pwd-age", + help="The minimum password age ( | default). Default is 1.", type=str), + Option("--max-pwd-age", + help="The maximum password age ( | default). Default is 43.", type=str), + Option("--account-lockout-duration", + help="The length of time an account is locked out after exceeding the limit on bad password attempts ( | default). Default is 30 mins.", type=str), + Option("--account-lockout-threshold", + help="The number of bad password attempts allowed before locking out the account ( | default). Default is 0 (never lock out).", type=str), + Option("--reset-account-lockout-after", + help="After this time is elapsed, the recorded number of attempts restarts from zero ( | default). Default is 30.", type=str), + ] + + def run(self, H=None, min_pwd_age=None, max_pwd_age=None, + quiet=False, complexity=None, store_plaintext=None, history_length=None, + min_pwd_length=None, account_lockout_duration=None, account_lockout_threshold=None, + reset_account_lockout_after=None, credopts=None, sambaopts=None, + versionopts=None): + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp) + + samdb = SamDB(url=H, session_info=system_session(), + credentials=creds, lp=lp) + + domain_dn = samdb.domain_dn() + msgs = [] + m = ldb.Message() + m.dn = ldb.Dn(samdb, domain_dn) + pwd_props = int(samdb.get_pwdProperties()) + + # get the current password age settings + max_pwd_age_ticks = samdb.get_maxPwdAge() + min_pwd_age_ticks = samdb.get_minPwdAge() + + if complexity is not None: + if complexity == "on" or complexity == "default": + pwd_props = pwd_props | DOMAIN_PASSWORD_COMPLEX + msgs.append("Password complexity activated!") + elif complexity == "off": + pwd_props = pwd_props & (~DOMAIN_PASSWORD_COMPLEX) + msgs.append("Password complexity deactivated!") + + if store_plaintext is not None: + if store_plaintext == "on" or store_plaintext == "default": + pwd_props = pwd_props | DOMAIN_PASSWORD_STORE_CLEARTEXT + msgs.append("Plaintext password storage for changed passwords activated!") + elif store_plaintext == "off": + pwd_props = pwd_props & (~DOMAIN_PASSWORD_STORE_CLEARTEXT) + msgs.append("Plaintext password storage for changed passwords deactivated!") + + if complexity is not None or store_plaintext is not None: + m["pwdProperties"] = ldb.MessageElement(str(pwd_props), + ldb.FLAG_MOD_REPLACE, "pwdProperties") + + if history_length is not None: + if history_length == "default": + pwd_hist_len = 24 + else: + pwd_hist_len = int(history_length) + + if pwd_hist_len < 0 or pwd_hist_len > 24: + raise CommandError("Password history length must be in the range of 0 to 24!") + + m["pwdHistoryLength"] = ldb.MessageElement(str(pwd_hist_len), + ldb.FLAG_MOD_REPLACE, "pwdHistoryLength") + msgs.append("Password history length changed!") + + if min_pwd_length is not None: + if min_pwd_length == "default": + min_pwd_len = 7 + else: + min_pwd_len = int(min_pwd_length) + + if min_pwd_len < 0 or min_pwd_len > 14: + raise CommandError("Minimum password length must be in the range of 0 to 14!") + + m["minPwdLength"] = ldb.MessageElement(str(min_pwd_len), + ldb.FLAG_MOD_REPLACE, "minPwdLength") + msgs.append("Minimum password length changed!") + + if min_pwd_age is not None: + if min_pwd_age == "default": + min_pwd_age = 1 + else: + min_pwd_age = int(min_pwd_age) + + if min_pwd_age < 0 or min_pwd_age > 998: + raise CommandError("Minimum password age must be in the range of 0 to 998!") + + # days -> ticks + min_pwd_age_ticks = -int(min_pwd_age * (24 * 60 * 60 * 1e7)) + + m["minPwdAge"] = ldb.MessageElement(str(min_pwd_age_ticks), + ldb.FLAG_MOD_REPLACE, "minPwdAge") + msgs.append("Minimum password age changed!") + + if max_pwd_age is not None: + if max_pwd_age == "default": + max_pwd_age = 43 + else: + max_pwd_age = int(max_pwd_age) + + if max_pwd_age < 0 or max_pwd_age > 999: + raise CommandError("Maximum password age must be in the range of 0 to 999!") + + # days -> ticks + if max_pwd_age == 0: + max_pwd_age_ticks = NEVER_TIMESTAMP + else: + max_pwd_age_ticks = -int(max_pwd_age * (24 * 60 * 60 * 1e7)) + + m["maxPwdAge"] = ldb.MessageElement(str(max_pwd_age_ticks), + ldb.FLAG_MOD_REPLACE, "maxPwdAge") + msgs.append("Maximum password age changed!") + + if account_lockout_duration is not None: + if account_lockout_duration == "default": + account_lockout_duration = 30 + else: + account_lockout_duration = int(account_lockout_duration) + + if account_lockout_duration < 0 or account_lockout_duration > 99999: + raise CommandError("Account lockout duration " + "must be in the range of 0 to 99999!") + + # minutes -> ticks + if account_lockout_duration == 0: + account_lockout_duration_ticks = NEVER_TIMESTAMP + else: + account_lockout_duration_ticks = -int(account_lockout_duration * (60 * 1e7)) + + m["lockoutDuration"] = ldb.MessageElement(str(account_lockout_duration_ticks), + ldb.FLAG_MOD_REPLACE, "lockoutDuration") + msgs.append("Account lockout duration changed!") + + if account_lockout_threshold is not None: + if account_lockout_threshold == "default": + account_lockout_threshold = 0 + else: + account_lockout_threshold = int(account_lockout_threshold) + + m["lockoutThreshold"] = ldb.MessageElement(str(account_lockout_threshold), + ldb.FLAG_MOD_REPLACE, "lockoutThreshold") + msgs.append("Account lockout threshold changed!") + + if reset_account_lockout_after is not None: + if reset_account_lockout_after == "default": + reset_account_lockout_after = 30 + else: + reset_account_lockout_after = int(reset_account_lockout_after) + + if reset_account_lockout_after < 0 or reset_account_lockout_after > 99999: + raise CommandError("Maximum password age must be in the range of 0 to 99999!") + + # minutes -> ticks + if reset_account_lockout_after == 0: + reset_account_lockout_after_ticks = NEVER_TIMESTAMP + else: + reset_account_lockout_after_ticks = -int(reset_account_lockout_after * (60 * 1e7)) + + m["lockOutObservationWindow"] = ldb.MessageElement(str(reset_account_lockout_after_ticks), + ldb.FLAG_MOD_REPLACE, "lockOutObservationWindow") + msgs.append("Duration to reset account lockout after changed!") + + if max_pwd_age or min_pwd_age: + # If we're setting either min or max password, make sure the max is + # still greater overall. As either setting could be None, we use the + # ticks here (which are always set) and work backwards. + max_pwd_age = timestamp_to_days(max_pwd_age_ticks) + min_pwd_age = timestamp_to_days(min_pwd_age_ticks) + if max_pwd_age != 0 and min_pwd_age >= max_pwd_age: + raise CommandError("Maximum password age (%d) must be greater than minimum password age (%d)!" % (max_pwd_age, min_pwd_age)) + + if len(m) == 0: + raise CommandError("You must specify at least one option to set. Try --help") + samdb.modify(m) + msgs.append("All changes applied successfully!") + self.message("\n".join(msgs)) + + +class cmd_domain_passwordsettings(SuperCommand): + """Manage password policy settings.""" + + subcommands = {} + subcommands["pso"] = cmd_domain_passwordsettings_pso() + subcommands["show"] = cmd_domain_passwordsettings_show() + subcommands["set"] = cmd_domain_passwordsettings_set() diff --git a/python/samba/netcmd/domain/provision.py b/python/samba/netcmd/domain/provision.py new file mode 100644 index 0000000..8f13e54 --- /dev/null +++ b/python/samba/netcmd/domain/provision.py @@ -0,0 +1,405 @@ +# domain management - domain provision +# +# Copyright Matthias Dieter Wallnoefer 2009 +# Copyright Andrew Kroeger 2009 +# Copyright Jelmer Vernooij 2007-2012 +# Copyright Giampaolo Lauria 2011 +# Copyright Matthieu Patou 2011 +# Copyright Andrew Bartlett 2008-2015 +# Copyright Stefan Metzmacher 2012 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import os +import sys +import tempfile + +import samba +import samba.getopt as options +from samba.auth import system_session +from samba.auth_util import system_session_unix +from samba.dcerpc import security +from samba.dsdb import ( + DS_DOMAIN_FUNCTION_2000, + DS_DOMAIN_FUNCTION_2003, + DS_DOMAIN_FUNCTION_2008, + DS_DOMAIN_FUNCTION_2008_R2, + DS_DOMAIN_FUNCTION_2012, + DS_DOMAIN_FUNCTION_2012_R2, + DS_DOMAIN_FUNCTION_2016 +) +from samba.netcmd import Command, CommandError, Option +from samba.provision import DEFAULT_MIN_PWD_LENGTH, ProvisioningError, provision +from samba.provision.common import FILL_DRS, FILL_FULL, FILL_NT4SYNC +from samba.samdb import get_default_backend_store +from samba import functional_level + +from .common import common_ntvfs_options, common_provision_join_options + + +class cmd_domain_provision(Command): + """Provision a domain.""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + } + + takes_options = [ + Option("--interactive", help="Ask for names", action="store_true"), + Option("--domain", type="string", metavar="DOMAIN", + help="NetBIOS domain name to use"), + Option("--domain-guid", type="string", metavar="GUID", + help="set domainguid (otherwise random)"), + Option("--domain-sid", type="string", metavar="SID", + help="set domainsid (otherwise random)"), + Option("--ntds-guid", type="string", metavar="GUID", + help="set NTDS object GUID (otherwise random)"), + Option("--invocationid", type="string", metavar="GUID", + help="set invocationid (otherwise random)"), + Option("--host-name", type="string", metavar="HOSTNAME", + help="set hostname"), + Option("--host-ip", type="string", metavar="IPADDRESS", + help="set IPv4 ipaddress"), + Option("--host-ip6", type="string", metavar="IP6ADDRESS", + help="set IPv6 ipaddress"), + Option("--site", type="string", metavar="SITENAME", + help="set site name"), + Option("--adminpass", type="string", metavar="PASSWORD", + help="choose admin password (otherwise random)"), + Option("--krbtgtpass", type="string", metavar="PASSWORD", + help="choose krbtgt password (otherwise random)"), + Option("--dns-backend", type="choice", metavar="NAMESERVER-BACKEND", + choices=["SAMBA_INTERNAL", "BIND9_FLATFILE", "BIND9_DLZ", "NONE"], + help="The DNS server backend. SAMBA_INTERNAL is the builtin name server (default), " + "BIND9_FLATFILE uses bind9 text database to store zone information, " + "BIND9_DLZ uses samba4 AD to store zone information, " + "NONE skips the DNS setup entirely (not recommended)", + default="SAMBA_INTERNAL"), + Option("--dnspass", type="string", metavar="PASSWORD", + help="choose dns password (otherwise random)"), + Option("--root", type="string", metavar="USERNAME", + help="choose 'root' unix username"), + Option("--nobody", type="string", metavar="USERNAME", + help="choose 'nobody' user"), + Option("--users", type="string", metavar="GROUPNAME", + help="choose 'users' group"), + Option("--blank", action="store_true", + help="do not add users or groups, just the structure"), + Option("--server-role", type="choice", metavar="ROLE", + choices=["domain controller", "dc", "member server", "member", "standalone"], + help="The server role (domain controller | dc | member server | member | standalone). Default is dc.", + default="domain controller"), + Option("--function-level", type="choice", metavar="FOR-FUN-LEVEL", + choices=["2000", "2003", "2008", "2008_R2", "2016"], + help="The domain and forest function level (2000 | 2003 | 2008 | 2008_R2 - always native | 2016). Default is (Windows) 2008_R2 Native.", + default="2008_R2"), + Option("--base-schema", type="choice", metavar="BASE-SCHEMA", + choices=["2008_R2", "2008_R2_old", "2012", "2012_R2", "2016", "2019"], + help="The base schema files to use. Default is (Windows) 2019.", + default="2019"), + Option("--adprep-level", type="choice", metavar="FUNCTION_LEVEL", + choices=["SKIP", "2008_R2", "2012", "2012_R2", "2016"], + help="The highest functional level to prepare for. Default is based on --base-schema", + default=None), + Option("--next-rid", type="int", metavar="NEXTRID", default=1000, + help="The initial nextRid value (only needed for upgrades). Default is 1000."), + Option("--partitions-only", + help="Configure Samba's partitions, but do not modify them (ie, join a BDC)", action="store_true"), + Option("--use-rfc2307", action="store_true", help="Use AD to store posix attributes (default = no)"), + ] + + ntvfs_options = [ + Option("--use-xattrs", type="choice", choices=["yes", "no", "auto"], + metavar="[yes|no|auto]", + help="Define if we should use the native fs capabilities or a tdb file for " + "storing attributes likes ntacl when --use-ntvfs is set. " + "auto tries to make an intelligent guess based on the user rights and system capabilities", + default="auto") + ] + + takes_options.extend(common_provision_join_options) + + if samba.is_ntvfs_fileserver_built(): + takes_options.extend(common_ntvfs_options) + takes_options.extend(ntvfs_options) + + takes_args = [] + + def run(self, sambaopts=None, versionopts=None, + interactive=None, + domain=None, + domain_guid=None, + domain_sid=None, + ntds_guid=None, + invocationid=None, + host_name=None, + host_ip=None, + host_ip6=None, + adminpass=None, + site=None, + krbtgtpass=None, + machinepass=None, + dns_backend=None, + dns_forwarder=None, + dnspass=None, + ldapadminpass=None, + root=None, + nobody=None, + users=None, + quiet=None, + blank=None, + server_role=None, + function_level=None, + adprep_level=None, + next_rid=None, + partitions_only=None, + targetdir=None, + use_xattrs="auto", + use_ntvfs=False, + use_rfc2307=None, + base_schema=None, + plaintext_secrets=False, + backend_store=None, + backend_store_size=None): + + self.logger = self.get_logger(name="provision", quiet=quiet) + + lp = sambaopts.get_loadparm() + smbconf = lp.configfile + + if dns_forwarder is not None: + suggested_forwarder = dns_forwarder + else: + suggested_forwarder = self._get_nameserver_ip() + if suggested_forwarder is None: + suggested_forwarder = "none" + + if not self.raw_argv: + interactive = True + + if interactive: + from getpass import getpass + import socket + + def ask(prompt, default=None): + if default is not None: + print("%s [%s]: " % (prompt, default), end=' ') + else: + print("%s: " % (prompt,), end=' ') + sys.stdout.flush() + return sys.stdin.readline().rstrip("\n") or default + + try: + default = socket.getfqdn().split(".", 1)[1].upper() + except IndexError: + default = None + realm = ask("Realm", default) + if realm in (None, ""): + raise CommandError("No realm set!") + + try: + default = realm.split(".")[0] + except IndexError: + default = None + domain = ask("Domain", default) + if domain is None: + raise CommandError("No domain set!") + + server_role = ask("Server Role (dc, member, standalone)", "dc") + + dns_backend = ask("DNS backend (SAMBA_INTERNAL, BIND9_FLATFILE, BIND9_DLZ, NONE)", "SAMBA_INTERNAL") + if dns_backend in (None, ''): + raise CommandError("No DNS backend set!") + + if dns_backend == "SAMBA_INTERNAL": + dns_forwarder = ask("DNS forwarder IP address (write 'none' to disable forwarding)", suggested_forwarder) + if dns_forwarder.lower() in (None, 'none'): + suggested_forwarder = None + dns_forwarder = None + + while True: + adminpassplain = getpass("Administrator password: ") + issue = self._adminpass_issue(adminpassplain) + if issue: + self.errf.write("%s.\n" % issue) + else: + adminpassverify = getpass("Retype password: ") + if not adminpassplain == adminpassverify: + self.errf.write("Sorry, passwords do not match.\n") + else: + adminpass = adminpassplain + break + + else: + realm = sambaopts._lp.get('realm') + if realm is None: + raise CommandError("No realm set!") + if domain is None: + raise CommandError("No domain set!") + + if adminpass: + issue = self._adminpass_issue(adminpass) + if issue: + raise CommandError(issue) + else: + self.logger.info("Administrator password will be set randomly!") + + try: + dom_for_fun_level = functional_level.string_to_level(function_level) + except KeyError: + raise CommandError(f"'{function_level}' is not a valid domain level") + + if adprep_level is None: + # Select the adprep_level default based + # on what the base schema permits + if base_schema in ["2008_R2", "2008_R2_old"]: + # without explicit --adprep-level=2008_R2 + # we will skip the adprep step on + # provision + adprep_level = "SKIP" + elif base_schema in ["2012"]: + adprep_level = "2012" + elif base_schema in ["2012_R2"]: + adprep_level = "2012_R2" + else: + adprep_level = "2016" + + if adprep_level == "SKIP": + provision_adprep_level = None + elif adprep_level == "2008R2": + provision_adprep_level = DS_DOMAIN_FUNCTION_2008_R2 + elif adprep_level == "2012": + provision_adprep_level = DS_DOMAIN_FUNCTION_2012 + elif adprep_level == "2012_R2": + provision_adprep_level = DS_DOMAIN_FUNCTION_2012_R2 + elif adprep_level == "2016": + provision_adprep_level = DS_DOMAIN_FUNCTION_2016 + + if dns_backend == "SAMBA_INTERNAL" and dns_forwarder is None: + dns_forwarder = suggested_forwarder + + samdb_fill = FILL_FULL + if blank: + samdb_fill = FILL_NT4SYNC + elif partitions_only: + samdb_fill = FILL_DRS + + if targetdir is not None: + if not os.path.isdir(targetdir): + os.makedirs(targetdir) + + eadb = True + + if use_xattrs == "yes": + eadb = False + elif use_xattrs == "auto" and not use_ntvfs: + eadb = False + elif not use_ntvfs: + raise CommandError("--use-xattrs=no requires --use-ntvfs (not supported for production use). " + "Please re-run with --use-xattrs omitted.") + elif use_xattrs == "auto" and not lp.get("posix:eadb"): + if targetdir: + file = tempfile.NamedTemporaryFile(dir=os.path.abspath(targetdir)) + else: + file = tempfile.NamedTemporaryFile(dir=os.path.abspath(os.path.dirname(lp.get("private dir")))) + try: + try: + samba.ntacls.setntacl(lp, file.name, + "O:S-1-5-32G:S-1-5-32", + "S-1-5-32", + system_session_unix(), + "native") + eadb = False + except Exception: + self.logger.info("You are not root or your system does not support xattr, using tdb backend for attributes. ") + finally: + file.close() + + if eadb: + self.logger.info("not using extended attributes to store ACLs and other metadata. If you intend to use this provision in production, rerun the script as root on a system supporting xattrs.") + + if domain_sid is not None: + domain_sid = security.dom_sid(domain_sid) + + session = system_session() + if backend_store is None: + backend_store = get_default_backend_store() + try: + result = provision(self.logger, + session, smbconf=smbconf, targetdir=targetdir, + samdb_fill=samdb_fill, realm=realm, domain=domain, + domainguid=domain_guid, domainsid=domain_sid, + hostname=host_name, + hostip=host_ip, hostip6=host_ip6, + sitename=site, ntdsguid=ntds_guid, + invocationid=invocationid, adminpass=adminpass, + krbtgtpass=krbtgtpass, machinepass=machinepass, + dns_backend=dns_backend, dns_forwarder=dns_forwarder, + dnspass=dnspass, root=root, nobody=nobody, + users=users, + serverrole=server_role, dom_for_fun_level=dom_for_fun_level, + useeadb=eadb, next_rid=next_rid, lp=lp, use_ntvfs=use_ntvfs, + use_rfc2307=use_rfc2307, skip_sysvolacl=False, + base_schema=base_schema, + adprep_level=provision_adprep_level, + plaintext_secrets=plaintext_secrets, + backend_store=backend_store, + backend_store_size=backend_store_size) + + except ProvisioningError as e: + raise CommandError("Provision failed", e) + + result.report_logger(self.logger) + + def _get_nameserver_ip(self): + """Grab the nameserver IP address from /etc/resolv.conf.""" + from os import path + RESOLV_CONF = "/etc/resolv.conf" + + if not path.isfile(RESOLV_CONF): + self.logger.warning("Failed to locate %s" % RESOLV_CONF) + return None + + handle = None + try: + handle = open(RESOLV_CONF, 'r') + for line in handle: + if not line.startswith('nameserver'): + continue + # we want the last non-space continuous string of the line + return line.strip().split()[-1] + finally: + if handle is not None: + handle.close() + + self.logger.warning("No nameserver found in %s" % RESOLV_CONF) + + def _adminpass_issue(self, adminpass): + """Returns error string for a bad administrator password, + or None if acceptable""" + if isinstance(adminpass, bytes): + adminpass = adminpass.decode('utf8') + if len(adminpass) < DEFAULT_MIN_PWD_LENGTH: + return "Administrator password does not meet the default minimum" \ + " password length requirement (%d characters)" \ + % DEFAULT_MIN_PWD_LENGTH + elif not samba.check_password_quality(adminpass): + return "Administrator password does not meet the default" \ + " quality standards" + else: + return None diff --git a/python/samba/netcmd/domain/samba3upgrade.py b/python/samba/netcmd/domain/samba3upgrade.py new file mode 100644 index 0000000..67f4b42 --- /dev/null +++ b/python/samba/netcmd/domain/samba3upgrade.py @@ -0,0 +1,34 @@ +# domain management - domain samba3upgrade +# +# Copyright Matthias Dieter Wallnoefer 2009 +# Copyright Andrew Kroeger 2009 +# Copyright Jelmer Vernooij 2007-2012 +# Copyright Giampaolo Lauria 2011 +# Copyright Matthieu Patou 2011 +# Copyright Andrew Bartlett 2008-2015 +# Copyright Stefan Metzmacher 2012 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +from .classicupgrade import cmd_domain_classicupgrade + + +class cmd_domain_samba3upgrade(cmd_domain_classicupgrade): + __doc__ = cmd_domain_classicupgrade.__doc__ + + # This command is present for backwards compatibility only, + # and should not be shown. + + hidden = True diff --git a/python/samba/netcmd/domain/schemaupgrade.py b/python/samba/netcmd/domain/schemaupgrade.py new file mode 100644 index 0000000..ff00a77 --- /dev/null +++ b/python/samba/netcmd/domain/schemaupgrade.py @@ -0,0 +1,350 @@ +# domain management - domain schemaupgrade +# +# Copyright Matthias Dieter Wallnoefer 2009 +# Copyright Andrew Kroeger 2009 +# Copyright Jelmer Vernooij 2007-2012 +# Copyright Giampaolo Lauria 2011 +# Copyright Matthieu Patou 2011 +# Copyright Andrew Bartlett 2008-2015 +# Copyright Stefan Metzmacher 2012 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import os +import shutil +import subprocess +import tempfile + +import ldb +import samba.getopt as options +from samba.auth import system_session +from samba.netcmd import Command, CommandError, Option +from samba.netcmd.fsmo import get_fsmo_roleowner +from samba.provision import setup_path +from samba.samdb import SamDB + + +class ldif_schema_update: + """Helper class for applying LDIF schema updates""" + + def __init__(self): + self.is_defunct = False + self.unknown_oid = None + self.dn = None + self.ldif = "" + + def can_ignore_failure(self, error): + """Checks if we can safely ignore failure to apply an LDIF update""" + (num, errstr) = error.args + + # Microsoft has marked objects as defunct that Samba doesn't know about + if num == ldb.ERR_NO_SUCH_OBJECT and self.is_defunct: + print("Defunct object %s doesn't exist, skipping" % self.dn) + return True + elif self.unknown_oid is not None: + print("Skipping unknown OID %s for object %s" % (self.unknown_oid, self.dn)) + return True + + return False + + def apply(self, samdb): + """Applies a single LDIF update to the schema""" + + try: + try: + samdb.modify_ldif(self.ldif, controls=['relax:0']) + except ldb.LdbError as e: + if e.args[0] == ldb.ERR_INVALID_ATTRIBUTE_SYNTAX: + + # REFRESH after a failed change + + # Otherwise the OID-to-attribute mapping in + # _apply_updates_in_file() won't work, because it + # can't lookup the new OID in the schema + samdb.set_schema_update_now() + + samdb.modify_ldif(self.ldif, controls=['relax:0']) + else: + raise + except ldb.LdbError as e: + if self.can_ignore_failure(e): + return 0 + else: + print("Exception: %s" % e) + print("Encountered while trying to apply the following LDIF") + print("----------------------------------------------------") + print("%s" % self.ldif) + + raise + + return 1 + + +class cmd_domain_schema_upgrade(Command): + """Domain schema upgrading""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H"), + Option("-q", "--quiet", help="Be quiet", action="store_true"), # unused + Option("-v", "--verbose", help="Be verbose", action="store_true"), + Option("--schema", type="choice", metavar="SCHEMA", + choices=["2012", "2012_R2", "2016", "2019"], + help="The schema file to upgrade to. Default is (Windows) 2019.", + default="2019"), + Option("--ldf-file", type=str, default=None, + help="Just apply the schema updates in the adprep/.LDF file(s) specified"), + Option("--base-dir", type=str, default=None, + help="Location of ldf files Default is ${SETUPDIR}/adprep.") + ] + + def _apply_updates_in_file(self, samdb, ldif_file): + """ + Applies a series of updates specified in an .LDIF file. The .LDIF file + is based on the adprep Schema updates provided by Microsoft. + """ + count = 0 + ldif_op = ldif_schema_update() + + # parse the file line by line and work out each update operation to apply + for line in ldif_file: + + line = line.rstrip() + + # the operations in the .LDIF file are separated by blank lines. If + # we hit a blank line, try to apply the update we've parsed so far + if line == '': + + # keep going if we haven't parsed anything yet + if ldif_op.ldif == '': + continue + + # Apply the individual change + count += ldif_op.apply(samdb) + + # start storing the next operation from scratch again + ldif_op = ldif_schema_update() + continue + + # replace the placeholder domain name in the .ldif file with the real domain + if line.upper().endswith('DC=X'): + line = line[:-len('DC=X')] + str(samdb.get_default_basedn()) + elif line.upper().endswith('CN=X'): + line = line[:-len('CN=X')] + str(samdb.get_default_basedn()) + + values = line.split(':') + + if values[0].lower() == 'dn': + ldif_op.dn = values[1].strip() + + # replace the Windows-specific operation with the Samba one + if values[0].lower() == 'changetype': + line = line.lower().replace(': ntdsschemaadd', + ': add') + line = line.lower().replace(': ntdsschemamodify', + ': modify') + line = line.lower().replace(': ntdsschemamodrdn', + ': modrdn') + line = line.lower().replace(': ntdsschemadelete', + ': delete') + + if values[0].lower() in ['rdnattid', 'subclassof', + 'systemposssuperiors', + 'systemmaycontain', + 'systemauxiliaryclass']: + _, value = values + + # The Microsoft updates contain some OIDs we don't recognize. + # Query the DB to see if we can work out the OID this update is + # referring to. If we find a match, then replace the OID with + # the ldapDisplayname + if '.' in value: + res = samdb.search(base=samdb.get_schema_basedn(), + expression="(|(attributeId=%s)(governsId=%s))" % + (value, value), + attrs=['ldapDisplayName']) + + if len(res) != 1: + ldif_op.unknown_oid = value + else: + display_name = str(res[0]['ldapDisplayName'][0]) + line = line.replace(value, ' ' + display_name) + + # Microsoft has marked objects as defunct that Samba doesn't know about + if values[0].lower() == 'isdefunct' and values[1].strip().lower() == 'true': + ldif_op.is_defunct = True + + # Samba has added the showInAdvancedViewOnly attribute to all objects, + # so rather than doing an add, we need to do a replace + if values[0].lower() == 'add' and values[1].strip().lower() == 'showinadvancedviewonly': + line = 'replace: showInAdvancedViewOnly' + + # Add the line to the current LDIF operation (including the newline + # we stripped off at the start of the loop) + ldif_op.ldif += line + '\n' + + return count + + def _apply_update(self, samdb, update_file, base_dir): + """Wrapper function for parsing an LDIF file and applying the updates""" + + print("Applying %s updates..." % update_file) + + ldif_file = None + try: + ldif_file = open(os.path.join(base_dir, update_file)) + + count = self._apply_updates_in_file(samdb, ldif_file) + + finally: + if ldif_file: + ldif_file.close() + + print("%u changes applied" % count) + + return count + + def run(self, **kwargs): + try: + from samba.ms_schema_markdown import read_ms_markdown + except ImportError as e: + self.outf.write("Exception in importing markdown: %s\n" % e) + raise CommandError('Failed to import module markdown') + from samba.schema import Schema + + updates_allowed_overridden = False + sambaopts = kwargs.get("sambaopts") + credopts = kwargs.get("credopts") + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp) + H = kwargs.get("H") + target_schema = kwargs.get("schema") + ldf_files = kwargs.get("ldf_file") + base_dir = kwargs.get("base_dir") + + temp_folder = None + + samdb = SamDB(url=H, session_info=system_session(), credentials=creds, lp=lp) + + # we're not going to get far if the config doesn't allow schema updates + if lp.get("dsdb:schema update allowed") is None: + lp.set("dsdb:schema update allowed", "yes") + print("Temporarily overriding 'dsdb:schema update allowed' setting") + updates_allowed_overridden = True + + own_dn = ldb.Dn(samdb, samdb.get_dsServiceName()) + master = get_fsmo_roleowner(samdb, str(samdb.get_schema_basedn()), + 'schema') + if own_dn != master: + raise CommandError("This server is not the schema master.") + + # if specific LDIF files were specified, just apply them + if ldf_files: + schema_updates = ldf_files.split(",") + else: + schema_updates = [] + + # work out the version of the target schema we're upgrading to + end = Schema.get_version(target_schema) + + # work out the version of the schema we're currently using + res = samdb.search(base=samdb.get_schema_basedn(), + scope=ldb.SCOPE_BASE, attrs=['objectVersion']) + + if len(res) != 1: + raise CommandError('Could not determine current schema version') + start = int(res[0]['objectVersion'][0]) + 1 + + diff_dir = setup_path("adprep/WindowsServerDocs") + if base_dir is None: + # Read from the Schema-Updates.md file + temp_folder = tempfile.mkdtemp() + + update_file = setup_path("adprep/WindowsServerDocs/Schema-Updates.md") + + try: + read_ms_markdown(update_file, temp_folder) + except Exception as e: + print("Exception in markdown parsing: %s" % e) + shutil.rmtree(temp_folder) + raise CommandError('Failed to upgrade schema') + + base_dir = temp_folder + + for version in range(start, end + 1): + update = 'Sch%d.ldf' % version + schema_updates.append(update) + + # Apply patches if we parsed the Schema-Updates.md file + diff = os.path.abspath(os.path.join(diff_dir, update + '.diff')) + if temp_folder and os.path.exists(diff): + try: + p = subprocess.Popen(['patch', update, '-i', diff], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, cwd=temp_folder) + except (OSError, IOError): + shutil.rmtree(temp_folder) + raise CommandError("Failed to upgrade schema. " + "Is '/usr/bin/patch' missing?") + + stdout, stderr = p.communicate() + + if p.returncode: + print("Exception in patch: %s\n%s" % (stdout, stderr)) + shutil.rmtree(temp_folder) + raise CommandError('Failed to upgrade schema') + + print("Patched %s using %s" % (update, diff)) + + if base_dir is None: + base_dir = setup_path("adprep") + + samdb.transaction_start() + count = 0 + error_encountered = False + + try: + # Apply the schema updates needed to move to the new schema version + for ldif_file in schema_updates: + count += self._apply_update(samdb, ldif_file, base_dir) + + if count > 0: + samdb.transaction_commit() + print("Schema successfully updated") + else: + print("No changes applied to schema") + samdb.transaction_cancel() + except Exception as e: + print("Exception: %s" % e) + print("Error encountered, aborting schema upgrade") + samdb.transaction_cancel() + error_encountered = True + + if updates_allowed_overridden: + lp.set("dsdb:schema update allowed", "no") + + if temp_folder: + shutil.rmtree(temp_folder) + + if error_encountered: + raise CommandError('Failed to upgrade schema') diff --git a/python/samba/netcmd/domain/tombstones.py b/python/samba/netcmd/domain/tombstones.py new file mode 100644 index 0000000..673bb9a --- /dev/null +++ b/python/samba/netcmd/domain/tombstones.py @@ -0,0 +1,116 @@ +# domain management - domain tombstones +# +# Copyright Matthias Dieter Wallnoefer 2009 +# Copyright Andrew Kroeger 2009 +# Copyright Jelmer Vernooij 2007-2012 +# Copyright Giampaolo Lauria 2011 +# Copyright Matthieu Patou 2011 +# Copyright Andrew Bartlett 2008-2015 +# Copyright Stefan Metzmacher 2012 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import time + +import ldb +import samba.getopt as options +from samba.auth import system_session +from samba.netcmd import Command, CommandError, Option, SuperCommand +from samba.samdb import SamDB + + +class cmd_domain_tombstones_expunge(Command): + """Expunge tombstones from the database. + +This command expunges tombstones from the database.""" + synopsis = "%prog NC [NC [...]] [options]" + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H"), + Option("--current-time", + help="The current time to evaluate the tombstone lifetime from, expressed as YYYY-MM-DD", + type=str), + Option("--tombstone-lifetime", help="Number of days a tombstone should be preserved for", type=int), + ] + + takes_args = ["nc*"] + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "versionopts": options.VersionOptions, + } + + def run(self, *ncs, **kwargs): + sambaopts = kwargs.get("sambaopts") + credopts = kwargs.get("credopts") + H = kwargs.get("H") + current_time_string = kwargs.get("current_time") + tombstone_lifetime = kwargs.get("tombstone_lifetime") + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp) + samdb = SamDB(url=H, session_info=system_session(), + credentials=creds, lp=lp) + + if current_time_string is None and tombstone_lifetime is None: + print("Note: without --current-time or --tombstone-lifetime " + "only tombstones already scheduled for deletion will " + "be deleted.", file=self.outf) + print("To remove all tombstones, use --tombstone-lifetime=0.", + file=self.outf) + + if current_time_string is not None: + current_time_obj = time.strptime(current_time_string, "%Y-%m-%d") + current_time = int(time.mktime(current_time_obj)) + + else: + current_time = int(time.time()) + + if len(ncs) == 0: + res = samdb.search(expression="", base="", scope=ldb.SCOPE_BASE, + attrs=["namingContexts"]) + + ncs = [] + for nc in res[0]["namingContexts"]: + ncs.append(str(nc)) + else: + ncs = list(ncs) + + started_transaction = False + try: + samdb.transaction_start() + started_transaction = True + (removed_objects, + removed_links) = samdb.garbage_collect_tombstones(ncs, + current_time=current_time, + tombstone_lifetime=tombstone_lifetime) + + except Exception as err: + if started_transaction: + samdb.transaction_cancel() + raise CommandError("Failed to expunge / garbage collect tombstones", err) + + samdb.transaction_commit() + + self.outf.write("Removed %d objects and %d links successfully\n" + % (removed_objects, removed_links)) + + +class cmd_domain_tombstones(SuperCommand): + """Domain tombstone and recycled object management.""" + + subcommands = {} + subcommands["expunge"] = cmd_domain_tombstones_expunge() diff --git a/python/samba/netcmd/domain/trust.py b/python/samba/netcmd/domain/trust.py new file mode 100644 index 0000000..e930f00 --- /dev/null +++ b/python/samba/netcmd/domain/trust.py @@ -0,0 +1,2338 @@ +# domain management - domain trust +# +# Copyright Matthias Dieter Wallnoefer 2009 +# Copyright Andrew Kroeger 2009 +# Copyright Jelmer Vernooij 2007-2012 +# Copyright Giampaolo Lauria 2011 +# Copyright Matthieu Patou 2011 +# Copyright Andrew Bartlett 2008-2015 +# Copyright Stefan Metzmacher 2012 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import ctypes +from getpass import getpass + +import ldb +import samba.getopt as options +import samba.ntacls +from samba import NTSTATUSError, ntstatus, string_to_byte_array, werror +from samba.auth import system_session +from samba.dcerpc import drsblobs, lsa, nbt, netlogon, security +from samba.net import Net +from samba.netcmd import Command, CommandError, Option, SuperCommand +from samba.samdb import SamDB +from samba.trust_utils import CreateTrustedDomainRelax + + +class LocalDCCredentialsOptions(options.CredentialsOptions): + def __init__(self, parser): + options.CredentialsOptions.__init__(self, parser, special_name="local-dc") + + +class DomainTrustCommand(Command): + """List domain trusts.""" + + def __init__(self): + Command.__init__(self) + self.local_lp = None + + self.local_server = None + self.local_binding_string = None + self.local_creds = None + + self.remote_server = None + self.remote_binding_string = None + self.remote_creds = None + + def _uint32(self, v): + return ctypes.c_uint32(v).value + + def check_runtime_error(self, runtime, val): + if runtime is None: + return False + + err32 = self._uint32(runtime.args[0]) + if err32 == val: + return True + + return False + + class LocalRuntimeError(CommandError): + def __init__(exception_self, self, runtime, message): + err32 = self._uint32(runtime.args[0]) + errstr = runtime.args[1] + msg = "LOCAL_DC[%s]: %s - ERROR(0x%08X) - %s" % ( + self.local_server, message, err32, errstr) + CommandError.__init__(exception_self, msg) + + class RemoteRuntimeError(CommandError): + def __init__(exception_self, self, runtime, message): + err32 = self._uint32(runtime.args[0]) + errstr = runtime.args[1] + msg = "REMOTE_DC[%s]: %s - ERROR(0x%08X) - %s" % ( + self.remote_server, message, err32, errstr) + CommandError.__init__(exception_self, msg) + + class LocalLdbError(CommandError): + def __init__(exception_self, self, ldb_error, message): + errval = ldb_error.args[0] + errstr = ldb_error.args[1] + msg = "LOCAL_DC[%s]: %s - ERROR(%d) - %s" % ( + self.local_server, message, errval, errstr) + CommandError.__init__(exception_self, msg) + + def setup_local_server(self, sambaopts, localdcopts): + if self.local_server is not None: + return self.local_server + + lp = sambaopts.get_loadparm() + + local_server = localdcopts.ipaddress + if local_server is None: + server_role = lp.server_role() + if server_role != "ROLE_ACTIVE_DIRECTORY_DC": + raise CommandError("Invalid server_role %s" % (server_role)) + local_server = lp.get('netbios name') + local_transport = "ncalrpc" + local_binding_options = "" + local_binding_options += ",auth_type=ncalrpc_as_system" + local_ldap_url = None + local_creds = None + else: + local_transport = "ncacn_np" + local_binding_options = "" + local_ldap_url = "ldap://%s" % local_server + local_creds = localdcopts.get_credentials(lp) + + self.local_lp = lp + + self.local_server = local_server + self.local_binding_string = "%s:%s[%s]" % (local_transport, local_server, local_binding_options) + self.local_ldap_url = local_ldap_url + self.local_creds = local_creds + return self.local_server + + def new_local_lsa_connection(self): + return lsa.lsarpc(self.local_binding_string, self.local_lp, self.local_creds) + + def new_local_netlogon_connection(self): + return netlogon.netlogon(self.local_binding_string, self.local_lp, self.local_creds) + + def new_local_ldap_connection(self): + return SamDB(url=self.local_ldap_url, + session_info=system_session(), + credentials=self.local_creds, + lp=self.local_lp) + + def setup_remote_server(self, credopts, domain, + require_pdc=True, + require_writable=True): + + if require_pdc: + assert require_writable + + if self.remote_server is not None: + return self.remote_server + + self.remote_server = "__unknown__remote_server__.%s" % domain + assert self.local_server is not None + + remote_creds = credopts.get_credentials(self.local_lp) + remote_server = credopts.ipaddress + remote_binding_options = "" + + # TODO: we should also support NT4 domains + # we could use local_netlogon.netr_DsRGetDCNameEx2() with the remote domain name + # and delegate NBT or CLDAP to the local netlogon server + try: + remote_net = Net(remote_creds, self.local_lp, server=remote_server) + remote_flags = nbt.NBT_SERVER_LDAP | nbt.NBT_SERVER_DS + if require_writable: + remote_flags |= nbt.NBT_SERVER_WRITABLE + if require_pdc: + remote_flags |= nbt.NBT_SERVER_PDC + remote_info = remote_net.finddc(flags=remote_flags, domain=domain, address=remote_server) + except NTSTATUSError as error: + raise CommandError("Failed to find a writeable DC for domain '%s': %s" % + (domain, error.args[1])) + except Exception: + raise CommandError("Failed to find a writeable DC for domain '%s'" % domain) + flag_map = { + nbt.NBT_SERVER_PDC: "PDC", + nbt.NBT_SERVER_GC: "GC", + nbt.NBT_SERVER_LDAP: "LDAP", + nbt.NBT_SERVER_DS: "DS", + nbt.NBT_SERVER_KDC: "KDC", + nbt.NBT_SERVER_TIMESERV: "TIMESERV", + nbt.NBT_SERVER_CLOSEST: "CLOSEST", + nbt.NBT_SERVER_WRITABLE: "WRITABLE", + nbt.NBT_SERVER_GOOD_TIMESERV: "GOOD_TIMESERV", + nbt.NBT_SERVER_NDNC: "NDNC", + nbt.NBT_SERVER_SELECT_SECRET_DOMAIN_6: "SELECT_SECRET_DOMAIN_6", + nbt.NBT_SERVER_FULL_SECRET_DOMAIN_6: "FULL_SECRET_DOMAIN_6", + nbt.NBT_SERVER_ADS_WEB_SERVICE: "ADS_WEB_SERVICE", + nbt.NBT_SERVER_DS_8: "DS_8", + nbt.NBT_SERVER_DS_9: "DS_9", + nbt.NBT_SERVER_DS_10: "DS_10", + nbt.NBT_SERVER_HAS_DNS_NAME: "HAS_DNS_NAME", + nbt.NBT_SERVER_IS_DEFAULT_NC: "IS_DEFAULT_NC", + nbt.NBT_SERVER_FOREST_ROOT: "FOREST_ROOT", + } + server_type_string = self.generic_bitmap_to_string(flag_map, + remote_info.server_type, names_only=True) + self.outf.write("RemoteDC Netbios[%s] DNS[%s] ServerType[%s]\n" % ( + remote_info.pdc_name, + remote_info.pdc_dns_name, + server_type_string)) + + self.remote_server = remote_info.pdc_dns_name + self.remote_binding_string = "ncacn_np:%s[%s]" % (self.remote_server, remote_binding_options) + self.remote_creds = remote_creds + return self.remote_server + + def new_remote_lsa_connection(self): + return lsa.lsarpc(self.remote_binding_string, self.local_lp, self.remote_creds) + + def new_remote_netlogon_connection(self): + return netlogon.netlogon(self.remote_binding_string, self.local_lp, self.remote_creds) + + def get_lsa_info(self, conn, policy_access): + objectAttr = lsa.ObjectAttribute() + objectAttr.sec_qos = lsa.QosInfo() + + policy = conn.OpenPolicy2(b''.decode('utf-8'), + objectAttr, policy_access) + + info = conn.QueryInfoPolicy2(policy, lsa.LSA_POLICY_INFO_DNS) + + return (policy, info) + + def get_netlogon_dc_unc(self, conn, server, domain): + try: + info = conn.netr_DsRGetDCNameEx2(server, + None, 0, None, None, None, + netlogon.DS_RETURN_DNS_NAME) + return info.dc_unc + except RuntimeError: + return conn.netr_GetDcName(server, domain) + + def get_netlogon_dc_info(self, conn, server): + info = conn.netr_DsRGetDCNameEx2(server, + None, 0, None, None, None, + netlogon.DS_RETURN_DNS_NAME) + return info + + def netr_DomainTrust_to_name(self, t): + if t.trust_type == lsa.LSA_TRUST_TYPE_DOWNLEVEL: + return t.netbios_name + + return t.dns_name + + def netr_DomainTrust_to_type(self, a, t): + primary = None + primary_parent = None + for _t in a: + if _t.trust_flags & netlogon.NETR_TRUST_FLAG_PRIMARY: + primary = _t + if not _t.trust_flags & netlogon.NETR_TRUST_FLAG_TREEROOT: + primary_parent = a[_t.parent_index] + break + + if t.trust_flags & netlogon.NETR_TRUST_FLAG_IN_FOREST: + if t is primary_parent: + return "Parent" + + if t.trust_flags & netlogon.NETR_TRUST_FLAG_TREEROOT: + return "TreeRoot" + + parent = a[t.parent_index] + if parent is primary: + return "Child" + + return "Shortcut" + + if t.trust_attributes & lsa.LSA_TRUST_ATTRIBUTE_FOREST_TRANSITIVE: + return "Forest" + + return "External" + + def netr_DomainTrust_to_transitive(self, t): + if t.trust_flags & netlogon.NETR_TRUST_FLAG_IN_FOREST: + return "Yes" + + if t.trust_attributes & lsa.LSA_TRUST_ATTRIBUTE_NON_TRANSITIVE: + return "No" + + if t.trust_attributes & lsa.LSA_TRUST_ATTRIBUTE_FOREST_TRANSITIVE: + return "Yes" + + return "No" + + def netr_DomainTrust_to_direction(self, t): + if t.trust_flags & netlogon.NETR_TRUST_FLAG_INBOUND and \ + t.trust_flags & netlogon.NETR_TRUST_FLAG_OUTBOUND: + return "BOTH" + + if t.trust_flags & netlogon.NETR_TRUST_FLAG_INBOUND: + return "INCOMING" + + if t.trust_flags & netlogon.NETR_TRUST_FLAG_OUTBOUND: + return "OUTGOING" + + return "INVALID" + + def generic_enum_to_string(self, e_dict, v, names_only=False): + try: + w = e_dict[v] + except KeyError: + v32 = self._uint32(v) + w = "__unknown__%08X__" % v32 + + r = "0x%x (%s)" % (v, w) + return r + + def generic_bitmap_to_string(self, b_dict, v, names_only=False): + + s = [] + + c = v + for b in sorted(b_dict.keys()): + if not (c & b): + continue + c &= ~b + s += [b_dict[b]] + + if c != 0: + c32 = self._uint32(c) + s += ["__unknown_%08X__" % c32] + + w = ",".join(s) + if names_only: + return w + r = "0x%x (%s)" % (v, w) + return r + + def trustType_string(self, v): + types = { + lsa.LSA_TRUST_TYPE_DOWNLEVEL: "DOWNLEVEL", + lsa.LSA_TRUST_TYPE_UPLEVEL: "UPLEVEL", + lsa.LSA_TRUST_TYPE_MIT: "MIT", + lsa.LSA_TRUST_TYPE_DCE: "DCE", + } + return self.generic_enum_to_string(types, v) + + def trustDirection_string(self, v): + directions = { + lsa.LSA_TRUST_DIRECTION_INBOUND | + lsa.LSA_TRUST_DIRECTION_OUTBOUND: "BOTH", + lsa.LSA_TRUST_DIRECTION_INBOUND: "INBOUND", + lsa.LSA_TRUST_DIRECTION_OUTBOUND: "OUTBOUND", + } + return self.generic_enum_to_string(directions, v) + + def trustAttributes_string(self, v): + attributes = { + lsa.LSA_TRUST_ATTRIBUTE_NON_TRANSITIVE: "NON_TRANSITIVE", + lsa.LSA_TRUST_ATTRIBUTE_UPLEVEL_ONLY: "UPLEVEL_ONLY", + lsa.LSA_TRUST_ATTRIBUTE_QUARANTINED_DOMAIN: "QUARANTINED_DOMAIN", + lsa.LSA_TRUST_ATTRIBUTE_FOREST_TRANSITIVE: "FOREST_TRANSITIVE", + lsa.LSA_TRUST_ATTRIBUTE_CROSS_ORGANIZATION: "CROSS_ORGANIZATION", + lsa.LSA_TRUST_ATTRIBUTE_WITHIN_FOREST: "WITHIN_FOREST", + lsa.LSA_TRUST_ATTRIBUTE_TREAT_AS_EXTERNAL: "TREAT_AS_EXTERNAL", + lsa.LSA_TRUST_ATTRIBUTE_USES_RC4_ENCRYPTION: "USES_RC4_ENCRYPTION", + } + return self.generic_bitmap_to_string(attributes, v) + + def kerb_EncTypes_string(self, v): + enctypes = { + security.KERB_ENCTYPE_DES_CBC_CRC: "DES_CBC_CRC", + security.KERB_ENCTYPE_DES_CBC_MD5: "DES_CBC_MD5", + security.KERB_ENCTYPE_RC4_HMAC_MD5: "RC4_HMAC_MD5", + security.KERB_ENCTYPE_AES128_CTS_HMAC_SHA1_96: "AES128_CTS_HMAC_SHA1_96", + security.KERB_ENCTYPE_AES256_CTS_HMAC_SHA1_96: "AES256_CTS_HMAC_SHA1_96", + security.KERB_ENCTYPE_AES256_CTS_HMAC_SHA1_96_SK: "AES256_CTS_HMAC_SHA1_96-SK", + security.KERB_ENCTYPE_FAST_SUPPORTED: "FAST_SUPPORTED", + security.KERB_ENCTYPE_COMPOUND_IDENTITY_SUPPORTED: "COMPOUND_IDENTITY_SUPPORTED", + security.KERB_ENCTYPE_CLAIMS_SUPPORTED: "CLAIMS_SUPPORTED", + security.KERB_ENCTYPE_RESOURCE_SID_COMPRESSION_DISABLED: "RESOURCE_SID_COMPRESSION_DISABLED", + } + return self.generic_bitmap_to_string(enctypes, v) + + def entry_tln_status(self, e_flags, ): + if e_flags == 0: + return "Status[Enabled]" + + flags = { + lsa.LSA_TLN_DISABLED_NEW: "Disabled-New", + lsa.LSA_TLN_DISABLED_ADMIN: "Disabled", + lsa.LSA_TLN_DISABLED_CONFLICT: "Disabled-Conflicting", + } + return "Status[%s]" % self.generic_bitmap_to_string(flags, e_flags, names_only=True) + + def entry_dom_status(self, e_flags): + if e_flags == 0: + return "Status[Enabled]" + + flags = { + lsa.LSA_SID_DISABLED_ADMIN: "Disabled-SID", + lsa.LSA_SID_DISABLED_CONFLICT: "Disabled-SID-Conflicting", + lsa.LSA_NB_DISABLED_ADMIN: "Disabled-NB", + lsa.LSA_NB_DISABLED_CONFLICT: "Disabled-NB-Conflicting", + } + return "Status[%s]" % self.generic_bitmap_to_string(flags, e_flags, names_only=True) + + def write_forest_trust_info(self, fti, tln=None, collisions=None): + if tln is not None: + tln_string = " TDO[%s]" % tln + else: + tln_string = "" + + self.outf.write("Namespaces[%d]%s:\n" % ( + len(fti.entries), tln_string)) + + for i, e in enumerate(fti.entries): + + flags = e.flags + collision_string = "" + + if collisions is not None: + for c in collisions.entries: + if c.index != i: + continue + flags = c.flags + collision_string = " Collision[%s]" % (c.name.string) + + d = e.forest_trust_data + if e.type == lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME: + self.outf.write("TLN: %-32s DNS[*.%s]%s\n" % ( + self.entry_tln_status(flags), + d.string, collision_string)) + elif e.type == lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME_EX: + self.outf.write("TLN_EX: %-29s DNS[*.%s]\n" % ( + "", d.string)) + elif e.type == lsa.LSA_FOREST_TRUST_DOMAIN_INFO: + self.outf.write("DOM: %-32s DNS[%s] Netbios[%s] SID[%s]%s\n" % ( + self.entry_dom_status(flags), + d.dns_domain_name.string, + d.netbios_domain_name.string, + d.domain_sid, collision_string)) + return + + +class cmd_domain_trust_list(DomainTrustCommand): + """List domain trusts.""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "localdcopts": LocalDCCredentialsOptions, + } + + takes_options = [ + ] + + def run(self, sambaopts=None, versionopts=None, localdcopts=None): + + local_server = self.setup_local_server(sambaopts, localdcopts) + try: + local_netlogon = self.new_local_netlogon_connection() + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "failed to connect netlogon server") + + try: + local_netlogon_trusts = \ + local_netlogon.netr_DsrEnumerateDomainTrusts(local_server, + netlogon.NETR_TRUST_FLAG_IN_FOREST | + netlogon.NETR_TRUST_FLAG_OUTBOUND | + netlogon.NETR_TRUST_FLAG_INBOUND) + except RuntimeError as error: + if self.check_runtime_error(error, werror.WERR_RPC_S_PROCNUM_OUT_OF_RANGE): + # TODO: we could implement a fallback to lsa.EnumTrustDom() + raise CommandError("LOCAL_DC[%s]: netr_DsrEnumerateDomainTrusts not supported." % ( + local_server)) + raise self.LocalRuntimeError(self, error, "netr_DsrEnumerateDomainTrusts failed") + + a = local_netlogon_trusts.array + for t in a: + if t.trust_flags & netlogon.NETR_TRUST_FLAG_PRIMARY: + continue + self.outf.write("%-14s %-15s %-19s %s\n" % ( + "Type[%s]" % self.netr_DomainTrust_to_type(a, t), + "Transitive[%s]" % self.netr_DomainTrust_to_transitive(t), + "Direction[%s]" % self.netr_DomainTrust_to_direction(t), + "Name[%s]" % self.netr_DomainTrust_to_name(t))) + return + + +class cmd_domain_trust_show(DomainTrustCommand): + """Show trusted domain details.""" + + synopsis = "%prog NAME [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "localdcopts": LocalDCCredentialsOptions, + } + + takes_options = [ + ] + + takes_args = ["domain"] + + def run(self, domain, sambaopts=None, versionopts=None, localdcopts=None): + + self.setup_local_server(sambaopts, localdcopts) + try: + local_lsa = self.new_local_lsa_connection() + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "failed to connect lsa server") + + try: + local_policy_access = lsa.LSA_POLICY_VIEW_LOCAL_INFORMATION + (local_policy, local_lsa_info) = self.get_lsa_info(local_lsa, local_policy_access) + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "failed to query LSA_POLICY_INFO_DNS") + + self.outf.write("LocalDomain Netbios[%s] DNS[%s] SID[%s]\n" % ( + local_lsa_info.name.string, + local_lsa_info.dns_domain.string, + local_lsa_info.sid)) + + lsaString = lsa.String() + lsaString.string = domain + try: + local_tdo_full = \ + local_lsa.QueryTrustedDomainInfoByName(local_policy, + lsaString, + lsa.LSA_TRUSTED_DOMAIN_INFO_FULL_INFO) + local_tdo_info = local_tdo_full.info_ex + local_tdo_posix = local_tdo_full.posix_offset + except NTSTATUSError as error: + if self.check_runtime_error(error, ntstatus.NT_STATUS_OBJECT_NAME_NOT_FOUND): + raise CommandError("trusted domain object does not exist for domain [%s]" % domain) + + raise self.LocalRuntimeError(self, error, "QueryTrustedDomainInfoByName(FULL_INFO) failed") + + try: + local_tdo_enctypes = \ + local_lsa.QueryTrustedDomainInfoByName(local_policy, + lsaString, + lsa.LSA_TRUSTED_DOMAIN_SUPPORTED_ENCRYPTION_TYPES) + except NTSTATUSError as error: + if self.check_runtime_error(error, ntstatus.NT_STATUS_INVALID_PARAMETER): + error = None + if self.check_runtime_error(error, ntstatus.NT_STATUS_INVALID_INFO_CLASS): + error = None + + if error is not None: + raise self.LocalRuntimeError(self, error, + "QueryTrustedDomainInfoByName(SUPPORTED_ENCRYPTION_TYPES) failed") + + local_tdo_enctypes = lsa.TrustDomainInfoSupportedEncTypes() + local_tdo_enctypes.enc_types = 0 + + try: + local_tdo_forest = None + if local_tdo_info.trust_attributes & lsa.LSA_TRUST_ATTRIBUTE_FOREST_TRANSITIVE: + local_tdo_forest = \ + local_lsa.lsaRQueryForestTrustInformation(local_policy, + lsaString, + lsa.LSA_FOREST_TRUST_DOMAIN_INFO) + except RuntimeError as error: + if self.check_runtime_error(error, ntstatus.NT_STATUS_RPC_PROCNUM_OUT_OF_RANGE): + error = None + if self.check_runtime_error(error, ntstatus.NT_STATUS_NOT_FOUND): + error = None + if error is not None: + raise self.LocalRuntimeError(self, error, "lsaRQueryForestTrustInformation failed") + + local_tdo_forest = lsa.ForestTrustInformation() + local_tdo_forest.count = 0 + local_tdo_forest.entries = [] + + self.outf.write("TrustedDomain:\n\n") + self.outf.write("NetbiosName: %s\n" % local_tdo_info.netbios_name.string) + if local_tdo_info.netbios_name.string != local_tdo_info.domain_name.string: + self.outf.write("DnsName: %s\n" % local_tdo_info.domain_name.string) + self.outf.write("SID: %s\n" % local_tdo_info.sid) + self.outf.write("Type: %s\n" % self.trustType_string(local_tdo_info.trust_type)) + self.outf.write("Direction: %s\n" % self.trustDirection_string(local_tdo_info.trust_direction)) + self.outf.write("Attributes: %s\n" % self.trustAttributes_string(local_tdo_info.trust_attributes)) + posix_offset_u32 = ctypes.c_uint32(local_tdo_posix.posix_offset).value + posix_offset_i32 = ctypes.c_int32(local_tdo_posix.posix_offset).value + self.outf.write("PosixOffset: 0x%08X (%d)\n" % (posix_offset_u32, posix_offset_i32)) + self.outf.write("kerb_EncTypes: %s\n" % self.kerb_EncTypes_string(local_tdo_enctypes.enc_types)) + + if local_tdo_info.trust_attributes & lsa.LSA_TRUST_ATTRIBUTE_FOREST_TRANSITIVE: + self.write_forest_trust_info(local_tdo_forest, + tln=local_tdo_info.domain_name.string) + + return + +class cmd_domain_trust_modify(DomainTrustCommand): + """Show trusted domain details.""" + + synopsis = "%prog NAME [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "localdcopts": LocalDCCredentialsOptions, + } + + takes_options = [ + Option("--use-aes-keys", action="store_true", + help="The trust uses AES kerberos keys.", + dest='use_aes_keys', + default=None), + Option("--no-aes-keys", action="store_true", + help="The trust does not have any support for AES kerberos keys.", + dest='disable_aes_keys', + default=None), + Option("--raw-kerb-enctypes", action="store", + help="The raw kerberos enctype bits", + dest='kerb_enctypes', + default=None), + ] + + takes_args = ["domain"] + + def run(self, domain, sambaopts=None, versionopts=None, localdcopts=None, + disable_aes_keys=None, use_aes_keys=None, kerb_enctypes=None): + + num_modifications = 0 + + enctype_args = 0 + if kerb_enctypes is not None: + enctype_args += 1 + if use_aes_keys is not None: + enctype_args += 1 + if disable_aes_keys is not None: + enctype_args += 1 + if enctype_args > 1: + raise CommandError("--no-aes-keys, --use-aes-keys and --raw-kerb-enctypes are mutually exclusive") + if enctype_args == 1: + num_modifications += 1 + + if num_modifications == 0: + raise CommandError("modification arguments are required, try --help") + + self.setup_local_server(sambaopts, localdcopts) + try: + local_lsa = self.new_local_lsa_connection() + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "failed to connect to lsa server") + + try: + local_policy_access = lsa.LSA_POLICY_VIEW_LOCAL_INFORMATION + local_policy_access |= lsa.LSA_POLICY_TRUST_ADMIN + (local_policy, local_lsa_info) = self.get_lsa_info(local_lsa, local_policy_access) + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "failed to query LSA_POLICY_INFO_DNS") + + self.outf.write("LocalDomain Netbios[%s] DNS[%s] SID[%s]\n" % ( + local_lsa_info.name.string, + local_lsa_info.dns_domain.string, + local_lsa_info.sid)) + + if enctype_args == 1: + lsaString = lsa.String() + lsaString.string = domain + + try: + local_tdo_enctypes = \ + local_lsa.QueryTrustedDomainInfoByName(local_policy, + lsaString, + lsa.LSA_TRUSTED_DOMAIN_SUPPORTED_ENCRYPTION_TYPES) + except NTSTATUSError as error: + if self.check_runtime_error(error, ntstatus.NT_STATUS_INVALID_PARAMETER): + error = None + if self.check_runtime_error(error, ntstatus.NT_STATUS_INVALID_INFO_CLASS): + error = None + + if error is not None: + raise self.LocalRuntimeError(self, error, + "QueryTrustedDomainInfoByName(SUPPORTED_ENCRYPTION_TYPES) failed") + + local_tdo_enctypes = lsa.TrustDomainInfoSupportedEncTypes() + local_tdo_enctypes.enc_types = 0 + + self.outf.write("Old kerb_EncTypes: %s\n" % self.kerb_EncTypes_string(local_tdo_enctypes.enc_types)) + + enc_types = lsa.TrustDomainInfoSupportedEncTypes() + if kerb_enctypes is not None: + enc_types.enc_types = int(kerb_enctypes, base=0) + elif use_aes_keys is not None: + enc_types.enc_types = security.KERB_ENCTYPE_AES128_CTS_HMAC_SHA1_96 + enc_types.enc_types |= security.KERB_ENCTYPE_AES256_CTS_HMAC_SHA1_96 + elif disable_aes_keys is not None: + # CVE-2022-37966: Trust objects are no longer assumed to support + # RC4, so we must indicate support explicitly. + enc_types.enc_types = security.KERB_ENCTYPE_RC4_HMAC_MD5 + else: + raise CommandError("Internal error should be checked above") + + if enc_types.enc_types != local_tdo_enctypes.enc_types: + try: + local_tdo_enctypes = \ + local_lsa.SetTrustedDomainInfoByName(local_policy, + lsaString, + lsa.LSA_TRUSTED_DOMAIN_SUPPORTED_ENCRYPTION_TYPES, + enc_types) + self.outf.write("New kerb_EncTypes: %s\n" % self.kerb_EncTypes_string(enc_types.enc_types)) + except NTSTATUSError as error: + if error is not None: + raise self.LocalRuntimeError(self, error, + "SetTrustedDomainInfoByName(SUPPORTED_ENCRYPTION_TYPES) failed") + else: + self.outf.write("No kerb_EncTypes update needed\n") + + return + +class cmd_domain_trust_create(DomainTrustCommand): + """Create a domain or forest trust.""" + + synopsis = "%prog DOMAIN [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + "localdcopts": LocalDCCredentialsOptions, + } + + takes_options = [ + Option("--type", type="choice", metavar="TYPE", + choices=["external", "forest"], + help="The type of the trust: 'external' or 'forest'.", + dest='trust_type', + default="external"), + Option("--direction", type="choice", metavar="DIRECTION", + choices=["incoming", "outgoing", "both"], + help="The trust direction: 'incoming', 'outgoing' or 'both'.", + dest='trust_direction', + default="both"), + Option("--create-location", type="choice", metavar="LOCATION", + choices=["local", "both"], + help="Where to create the trusted domain object: 'local' or 'both'.", + dest='create_location', + default="both"), + Option("--cross-organisation", action="store_true", + help="The related domains does not belong to the same organisation.", + dest='cross_organisation', + default=False), + Option("--quarantined", type="choice", metavar="yes|no", + choices=["yes", "no", None], + help="Special SID filtering rules are applied to the trust. " + "With --type=external the default is yes. " + "With --type=forest the default is no.", + dest='quarantined_arg', + default=None), + Option("--not-transitive", action="store_true", + help="The forest trust is not transitive.", + dest='not_transitive', + default=False), + Option("--treat-as-external", action="store_true", + help="The treat the forest trust as external.", + dest='treat_as_external', + default=False), + Option("--no-aes-keys", action="store_false", + help="The trust does not use AES kerberos keys.", + dest='use_aes_keys', + default=True), + Option("--skip-validation", action="store_false", + help="Skip validation of the trust.", + dest='validate', + default=True), + ] + + takes_args = ["domain"] + + def run(self, domain, sambaopts=None, localdcopts=None, credopts=None, versionopts=None, + trust_type=None, trust_direction=None, create_location=None, + cross_organisation=False, quarantined_arg=None, + not_transitive=False, treat_as_external=False, + use_aes_keys=False, validate=True): + + lsaString = lsa.String() + + quarantined = False + if quarantined_arg is None: + if trust_type == 'external': + quarantined = True + elif quarantined_arg == 'yes': + quarantined = True + + if trust_type != 'forest': + if not_transitive: + raise CommandError("--not-transitive requires --type=forest") + if treat_as_external: + raise CommandError("--treat-as-external requires --type=forest") + + enc_types = lsa.TrustDomainInfoSupportedEncTypes() + if use_aes_keys: + enc_types.enc_types = security.KERB_ENCTYPE_AES128_CTS_HMAC_SHA1_96 + enc_types.enc_types |= security.KERB_ENCTYPE_AES256_CTS_HMAC_SHA1_96 + else: + # CVE-2022-37966: Trust objects are no longer assumed to support + # RC4, so we must indicate support explicitly. + enc_types.enc_types = security.KERB_ENCTYPE_RC4_HMAC_MD5 + + local_policy_access = lsa.LSA_POLICY_VIEW_LOCAL_INFORMATION + local_policy_access |= lsa.LSA_POLICY_TRUST_ADMIN + local_policy_access |= lsa.LSA_POLICY_CREATE_SECRET + + local_trust_info = lsa.TrustDomainInfoInfoEx() + local_trust_info.trust_type = lsa.LSA_TRUST_TYPE_UPLEVEL + local_trust_info.trust_direction = 0 + if trust_direction == "both": + local_trust_info.trust_direction |= lsa.LSA_TRUST_DIRECTION_INBOUND + local_trust_info.trust_direction |= lsa.LSA_TRUST_DIRECTION_OUTBOUND + elif trust_direction == "incoming": + local_trust_info.trust_direction |= lsa.LSA_TRUST_DIRECTION_INBOUND + elif trust_direction == "outgoing": + local_trust_info.trust_direction |= lsa.LSA_TRUST_DIRECTION_OUTBOUND + local_trust_info.trust_attributes = 0 + if cross_organisation: + local_trust_info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_CROSS_ORGANIZATION + if quarantined: + local_trust_info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_QUARANTINED_DOMAIN + if trust_type == "forest": + local_trust_info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_FOREST_TRANSITIVE + if not_transitive: + local_trust_info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_NON_TRANSITIVE + if treat_as_external: + local_trust_info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_TREAT_AS_EXTERNAL + + def get_password(name): + password = None + while True: + if password is not None and password != '': + return password + password = getpass("New %s Password: " % name) + passwordverify = getpass("Retype %s Password: " % name) + if not password == passwordverify: + password = None + self.outf.write("Sorry, passwords do not match.\n") + + incoming_secret = None + outgoing_secret = None + remote_policy_access = lsa.LSA_POLICY_VIEW_LOCAL_INFORMATION + if create_location == "local": + if local_trust_info.trust_direction & lsa.LSA_TRUST_DIRECTION_INBOUND: + incoming_password = get_password("Incoming Trust") + incoming_secret = string_to_byte_array(incoming_password.encode('utf-16-le')) + if local_trust_info.trust_direction & lsa.LSA_TRUST_DIRECTION_OUTBOUND: + outgoing_password = get_password("Outgoing Trust") + outgoing_secret = string_to_byte_array(outgoing_password.encode('utf-16-le')) + + remote_trust_info = None + else: + # We use 240 random bytes. + # Windows uses 28 or 240 random bytes. I guess it's + # based on the trust type external vs. forest. + # + # The initial trust password can be up to 512 bytes + # while the versioned passwords used for periodic updates + # can only be up to 498 bytes, as netr_ServerPasswordSet2() + # needs to pass the NL_PASSWORD_VERSION structure within the + # 512 bytes and a 2 bytes confounder is required. + # + def random_trust_secret(length): + pw = samba.generate_random_machine_password(length // 2, length // 2) + return string_to_byte_array(pw.encode('utf-16-le')) + + if local_trust_info.trust_direction & lsa.LSA_TRUST_DIRECTION_INBOUND: + incoming_secret = random_trust_secret(240) + if local_trust_info.trust_direction & lsa.LSA_TRUST_DIRECTION_OUTBOUND: + outgoing_secret = random_trust_secret(240) + + remote_policy_access |= lsa.LSA_POLICY_TRUST_ADMIN + remote_policy_access |= lsa.LSA_POLICY_CREATE_SECRET + + remote_trust_info = lsa.TrustDomainInfoInfoEx() + remote_trust_info.trust_type = lsa.LSA_TRUST_TYPE_UPLEVEL + remote_trust_info.trust_direction = 0 + if trust_direction == "both": + remote_trust_info.trust_direction |= lsa.LSA_TRUST_DIRECTION_INBOUND + remote_trust_info.trust_direction |= lsa.LSA_TRUST_DIRECTION_OUTBOUND + elif trust_direction == "incoming": + remote_trust_info.trust_direction |= lsa.LSA_TRUST_DIRECTION_OUTBOUND + elif trust_direction == "outgoing": + remote_trust_info.trust_direction |= lsa.LSA_TRUST_DIRECTION_INBOUND + remote_trust_info.trust_attributes = 0 + if cross_organisation: + remote_trust_info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_CROSS_ORGANIZATION + if quarantined: + remote_trust_info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_QUARANTINED_DOMAIN + if trust_type == "forest": + remote_trust_info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_FOREST_TRANSITIVE + if not_transitive: + remote_trust_info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_NON_TRANSITIVE + if treat_as_external: + remote_trust_info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_TREAT_AS_EXTERNAL + + local_server = self.setup_local_server(sambaopts, localdcopts) + try: + local_lsa = self.new_local_lsa_connection() + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "failed to connect lsa server") + + try: + (local_policy, local_lsa_info) = self.get_lsa_info(local_lsa, local_policy_access) + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "failed to query LSA_POLICY_INFO_DNS") + + self.outf.write("LocalDomain Netbios[%s] DNS[%s] SID[%s]\n" % ( + local_lsa_info.name.string, + local_lsa_info.dns_domain.string, + local_lsa_info.sid)) + + try: + remote_server = self.setup_remote_server(credopts, domain) + except RuntimeError as error: + raise self.RemoteRuntimeError(self, error, "failed to locate remote server") + + try: + remote_lsa = self.new_remote_lsa_connection() + except RuntimeError as error: + raise self.RemoteRuntimeError(self, error, "failed to connect lsa server") + + try: + (remote_policy, remote_lsa_info) = self.get_lsa_info(remote_lsa, remote_policy_access) + except RuntimeError as error: + raise self.RemoteRuntimeError(self, error, "failed to query LSA_POLICY_INFO_DNS") + + self.outf.write("RemoteDomain Netbios[%s] DNS[%s] SID[%s]\n" % ( + remote_lsa_info.name.string, + remote_lsa_info.dns_domain.string, + remote_lsa_info.sid)) + + local_trust_info.domain_name.string = remote_lsa_info.dns_domain.string + local_trust_info.netbios_name.string = remote_lsa_info.name.string + local_trust_info.sid = remote_lsa_info.sid + + if remote_trust_info: + remote_trust_info.domain_name.string = local_lsa_info.dns_domain.string + remote_trust_info.netbios_name.string = local_lsa_info.name.string + remote_trust_info.sid = local_lsa_info.sid + + try: + lsaString.string = local_trust_info.domain_name.string + local_lsa.QueryTrustedDomainInfoByName(local_policy, + lsaString, + lsa.LSA_TRUSTED_DOMAIN_INFO_FULL_INFO) + raise CommandError("TrustedDomain %s already exist'" % lsaString.string) + except NTSTATUSError as error: + if not self.check_runtime_error(error, ntstatus.NT_STATUS_OBJECT_NAME_NOT_FOUND): + raise self.LocalRuntimeError(self, error, + "QueryTrustedDomainInfoByName(%s, FULL_INFO) failed" % ( + lsaString.string)) + + try: + lsaString.string = local_trust_info.netbios_name.string + local_lsa.QueryTrustedDomainInfoByName(local_policy, + lsaString, + lsa.LSA_TRUSTED_DOMAIN_INFO_FULL_INFO) + raise CommandError("TrustedDomain %s already exist'" % lsaString.string) + except NTSTATUSError as error: + if not self.check_runtime_error(error, ntstatus.NT_STATUS_OBJECT_NAME_NOT_FOUND): + raise self.LocalRuntimeError(self, error, + "QueryTrustedDomainInfoByName(%s, FULL_INFO) failed" % ( + lsaString.string)) + + if remote_trust_info: + try: + lsaString.string = remote_trust_info.domain_name.string + remote_lsa.QueryTrustedDomainInfoByName(remote_policy, + lsaString, + lsa.LSA_TRUSTED_DOMAIN_INFO_FULL_INFO) + raise CommandError("TrustedDomain %s already exist'" % lsaString.string) + except NTSTATUSError as error: + if not self.check_runtime_error(error, ntstatus.NT_STATUS_OBJECT_NAME_NOT_FOUND): + raise self.RemoteRuntimeError(self, error, + "QueryTrustedDomainInfoByName(%s, FULL_INFO) failed" % ( + lsaString.string)) + + try: + lsaString.string = remote_trust_info.netbios_name.string + remote_lsa.QueryTrustedDomainInfoByName(remote_policy, + lsaString, + lsa.LSA_TRUSTED_DOMAIN_INFO_FULL_INFO) + raise CommandError("TrustedDomain %s already exist'" % lsaString.string) + except NTSTATUSError as error: + if not self.check_runtime_error(error, ntstatus.NT_STATUS_OBJECT_NAME_NOT_FOUND): + raise self.RemoteRuntimeError(self, error, + "QueryTrustedDomainInfoByName(%s, FULL_INFO) failed" % ( + lsaString.string)) + + try: + local_netlogon = self.new_local_netlogon_connection() + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "failed to connect netlogon server") + + try: + local_netlogon_info = self.get_netlogon_dc_info(local_netlogon, local_server) + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "failed to get netlogon dc info") + + if remote_trust_info: + try: + remote_netlogon = self.new_remote_netlogon_connection() + except RuntimeError as error: + raise self.RemoteRuntimeError(self, error, "failed to connect netlogon server") + + try: + remote_netlogon_dc_unc = self.get_netlogon_dc_unc(remote_netlogon, + remote_server, domain) + except RuntimeError as error: + raise self.RemoteRuntimeError(self, error, "failed to get netlogon dc info") + + def generate_AuthInOutBlob(secret, update_time): + if secret is None: + blob = drsblobs.trustAuthInOutBlob() + blob.count = 0 + + return blob + + clear = drsblobs.AuthInfoClear() + clear.size = len(secret) + clear.password = secret + + info = drsblobs.AuthenticationInformation() + info.LastUpdateTime = samba.unix2nttime(update_time) + info.AuthType = lsa.TRUST_AUTH_TYPE_CLEAR + info.AuthInfo = clear + + array = drsblobs.AuthenticationInformationArray() + array.count = 1 + array.array = [info] + + blob = drsblobs.trustAuthInOutBlob() + blob.count = 1 + blob.current = array + + return blob + + update_time = samba.current_unix_time() + incoming_blob = generate_AuthInOutBlob(incoming_secret, update_time) + outgoing_blob = generate_AuthInOutBlob(outgoing_secret, update_time) + + local_tdo_handle = None + remote_tdo_handle = None + + try: + if remote_trust_info: + self.outf.write("Creating remote TDO.\n") + current_request = {"location": "remote", "name": "CreateTrustedDomainEx2"} + remote_tdo_handle = CreateTrustedDomainRelax(remote_lsa, + remote_policy, + remote_trust_info, + lsa.LSA_TRUSTED_DOMAIN_ALL_ACCESS, + outgoing_blob, + incoming_blob) + self.outf.write("Remote TDO created.\n") + if enc_types: + self.outf.write("Setting supported encryption types on remote TDO.\n") + current_request = {"location": "remote", "name": "SetInformationTrustedDomain"} + remote_lsa.SetInformationTrustedDomain(remote_tdo_handle, + lsa.LSA_TRUSTED_DOMAIN_SUPPORTED_ENCRYPTION_TYPES, + enc_types) + + self.outf.write("Creating local TDO.\n") + current_request = {"location": "local", "name": "CreateTrustedDomainEx2"} + local_tdo_handle = CreateTrustedDomainRelax(local_lsa, + local_policy, + local_trust_info, + lsa.LSA_TRUSTED_DOMAIN_ALL_ACCESS, + incoming_blob, + outgoing_blob) + self.outf.write("Local TDO created\n") + if enc_types: + self.outf.write("Setting supported encryption types on local TDO.\n") + current_request = {"location": "local", "name": "SetInformationTrustedDomain"} + local_lsa.SetInformationTrustedDomain(local_tdo_handle, + lsa.LSA_TRUSTED_DOMAIN_SUPPORTED_ENCRYPTION_TYPES, + enc_types) + except RuntimeError as error: + self.outf.write("Error: %s failed %sly - cleaning up\n" % ( + current_request['name'], current_request['location'])) + if remote_tdo_handle: + self.outf.write("Deleting remote TDO.\n") + remote_lsa.DeleteObject(remote_tdo_handle) + remote_tdo_handle = None + if local_tdo_handle: + self.outf.write("Deleting local TDO.\n") + local_lsa.DeleteObject(local_tdo_handle) + local_tdo_handle = None + if current_request['location'] == "remote": + raise self.RemoteRuntimeError(self, error, "%s" % ( + current_request['name'])) + raise self.LocalRuntimeError(self, error, "%s" % ( + current_request['name'])) + + if validate: + if local_trust_info.trust_attributes & lsa.LSA_TRUST_ATTRIBUTE_FOREST_TRANSITIVE: + self.outf.write("Setup local forest trust information...\n") + try: + # get all information about the remote trust + # this triggers netr_GetForestTrustInformation to the remote domain + # and lsaRSetForestTrustInformation() locally, but new top level + # names are disabled by default. + local_forest_info = \ + local_netlogon.netr_DsRGetForestTrustInformation(local_netlogon_info.dc_unc, + remote_lsa_info.dns_domain.string, + netlogon.DS_GFTI_UPDATE_TDO) + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "netr_DsRGetForestTrustInformation() failed") + + try: + # here we try to enable all top level names + local_forest_collision = \ + local_lsa.lsaRSetForestTrustInformation(local_policy, + remote_lsa_info.dns_domain, + lsa.LSA_FOREST_TRUST_DOMAIN_INFO, + local_forest_info, + 0) + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "lsaRSetForestTrustInformation() failed") + + self.write_forest_trust_info(local_forest_info, + tln=remote_lsa_info.dns_domain.string, + collisions=local_forest_collision) + + if remote_trust_info: + self.outf.write("Setup remote forest trust information...\n") + try: + # get all information about the local trust (from the perspective of the remote domain) + # this triggers netr_GetForestTrustInformation to our domain. + # and lsaRSetForestTrustInformation() remotely, but new top level + # names are disabled by default. + remote_forest_info = \ + remote_netlogon.netr_DsRGetForestTrustInformation(remote_netlogon_dc_unc, + local_lsa_info.dns_domain.string, + netlogon.DS_GFTI_UPDATE_TDO) + except RuntimeError as error: + raise self.RemoteRuntimeError(self, error, "netr_DsRGetForestTrustInformation() failed") + + try: + # here we try to enable all top level names + remote_forest_collision = \ + remote_lsa.lsaRSetForestTrustInformation(remote_policy, + local_lsa_info.dns_domain, + lsa.LSA_FOREST_TRUST_DOMAIN_INFO, + remote_forest_info, + 0) + except RuntimeError as error: + raise self.RemoteRuntimeError(self, error, "lsaRSetForestTrustInformation() failed") + + self.write_forest_trust_info(remote_forest_info, + tln=local_lsa_info.dns_domain.string, + collisions=remote_forest_collision) + + if local_trust_info.trust_direction & lsa.LSA_TRUST_DIRECTION_OUTBOUND: + self.outf.write("Validating outgoing trust...\n") + try: + local_trust_verify = local_netlogon.netr_LogonControl2Ex(local_netlogon_info.dc_unc, + netlogon.NETLOGON_CONTROL_TC_VERIFY, + 2, + remote_lsa_info.dns_domain.string) + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "NETLOGON_CONTROL_TC_VERIFY failed") + + local_trust_status = self._uint32(local_trust_verify.pdc_connection_status[0]) + local_conn_status = self._uint32(local_trust_verify.tc_connection_status[0]) + + if local_trust_verify.flags & netlogon.NETLOGON_VERIFY_STATUS_RETURNED: + local_validation = "LocalValidation: DC[%s] CONNECTION[%s] TRUST[%s] VERIFY_STATUS_RETURNED" % ( + local_trust_verify.trusted_dc_name, + local_trust_verify.tc_connection_status[1], + local_trust_verify.pdc_connection_status[1]) + else: + local_validation = "LocalValidation: DC[%s] CONNECTION[%s] TRUST[%s]" % ( + local_trust_verify.trusted_dc_name, + local_trust_verify.tc_connection_status[1], + local_trust_verify.pdc_connection_status[1]) + + if local_trust_status != werror.WERR_SUCCESS or local_conn_status != werror.WERR_SUCCESS: + raise CommandError(local_validation) + else: + self.outf.write("OK: %s\n" % local_validation) + + if remote_trust_info: + if remote_trust_info.trust_direction & lsa.LSA_TRUST_DIRECTION_OUTBOUND: + self.outf.write("Validating incoming trust...\n") + try: + remote_trust_verify = \ + remote_netlogon.netr_LogonControl2Ex(remote_netlogon_dc_unc, + netlogon.NETLOGON_CONTROL_TC_VERIFY, + 2, + local_lsa_info.dns_domain.string) + except RuntimeError as error: + raise self.RemoteRuntimeError(self, error, "NETLOGON_CONTROL_TC_VERIFY failed") + + remote_trust_status = self._uint32(remote_trust_verify.pdc_connection_status[0]) + remote_conn_status = self._uint32(remote_trust_verify.tc_connection_status[0]) + + if remote_trust_verify.flags & netlogon.NETLOGON_VERIFY_STATUS_RETURNED: + remote_validation = "RemoteValidation: DC[%s] CONNECTION[%s] TRUST[%s] VERIFY_STATUS_RETURNED" % ( + remote_trust_verify.trusted_dc_name, + remote_trust_verify.tc_connection_status[1], + remote_trust_verify.pdc_connection_status[1]) + else: + remote_validation = "RemoteValidation: DC[%s] CONNECTION[%s] TRUST[%s]" % ( + remote_trust_verify.trusted_dc_name, + remote_trust_verify.tc_connection_status[1], + remote_trust_verify.pdc_connection_status[1]) + + if remote_trust_status != werror.WERR_SUCCESS or remote_conn_status != werror.WERR_SUCCESS: + raise CommandError(remote_validation) + else: + self.outf.write("OK: %s\n" % remote_validation) + + if remote_tdo_handle is not None: + try: + remote_lsa.Close(remote_tdo_handle) + except RuntimeError: + pass + remote_tdo_handle = None + if local_tdo_handle is not None: + try: + local_lsa.Close(local_tdo_handle) + except RuntimeError: + pass + local_tdo_handle = None + + self.outf.write("Success.\n") + return + + +class cmd_domain_trust_delete(DomainTrustCommand): + """Delete a domain trust.""" + + synopsis = "%prog DOMAIN [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + "localdcopts": LocalDCCredentialsOptions, + } + + takes_options = [ + Option("--delete-location", type="choice", metavar="LOCATION", + choices=["local", "both"], + help="Where to delete the trusted domain object: 'local' or 'both'.", + dest='delete_location', + default="both"), + ] + + takes_args = ["domain"] + + def run(self, domain, sambaopts=None, localdcopts=None, credopts=None, versionopts=None, + delete_location=None): + + local_policy_access = lsa.LSA_POLICY_VIEW_LOCAL_INFORMATION + local_policy_access |= lsa.LSA_POLICY_TRUST_ADMIN + local_policy_access |= lsa.LSA_POLICY_CREATE_SECRET + + if delete_location == "local": + remote_policy_access = None + else: + remote_policy_access = lsa.LSA_POLICY_VIEW_LOCAL_INFORMATION + remote_policy_access |= lsa.LSA_POLICY_TRUST_ADMIN + remote_policy_access |= lsa.LSA_POLICY_CREATE_SECRET + + self.setup_local_server(sambaopts, localdcopts) + try: + local_lsa = self.new_local_lsa_connection() + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "failed to connect lsa server") + + try: + (local_policy, local_lsa_info) = self.get_lsa_info(local_lsa, local_policy_access) + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "failed to query LSA_POLICY_INFO_DNS") + + self.outf.write("LocalDomain Netbios[%s] DNS[%s] SID[%s]\n" % ( + local_lsa_info.name.string, + local_lsa_info.dns_domain.string, + local_lsa_info.sid)) + + local_tdo_info = None + local_tdo_handle = None + remote_tdo_info = None + remote_tdo_handle = None + + lsaString = lsa.String() + try: + lsaString.string = domain + local_tdo_info = local_lsa.QueryTrustedDomainInfoByName(local_policy, + lsaString, lsa.LSA_TRUSTED_DOMAIN_INFO_INFO_EX) + except NTSTATUSError as error: + if self.check_runtime_error(error, ntstatus.NT_STATUS_OBJECT_NAME_NOT_FOUND): + raise CommandError("Failed to find trust for domain '%s'" % domain) + raise self.RemoteRuntimeError(self, error, "failed to locate remote server") + + if remote_policy_access is not None: + try: + self.setup_remote_server(credopts, domain) + except RuntimeError as error: + raise self.RemoteRuntimeError(self, error, "failed to locate remote server") + + try: + remote_lsa = self.new_remote_lsa_connection() + except RuntimeError as error: + raise self.RemoteRuntimeError(self, error, "failed to connect lsa server") + + try: + (remote_policy, remote_lsa_info) = self.get_lsa_info(remote_lsa, remote_policy_access) + except RuntimeError as error: + raise self.RemoteRuntimeError(self, error, "failed to query LSA_POLICY_INFO_DNS") + + self.outf.write("RemoteDomain Netbios[%s] DNS[%s] SID[%s]\n" % ( + remote_lsa_info.name.string, + remote_lsa_info.dns_domain.string, + remote_lsa_info.sid)) + + if remote_lsa_info.sid != local_tdo_info.sid or \ + remote_lsa_info.name.string != local_tdo_info.netbios_name.string or \ + remote_lsa_info.dns_domain.string != local_tdo_info.domain_name.string: + raise CommandError("LocalTDO inconsistent: Netbios[%s] DNS[%s] SID[%s]" % ( + local_tdo_info.netbios_name.string, + local_tdo_info.domain_name.string, + local_tdo_info.sid)) + + try: + lsaString.string = local_lsa_info.dns_domain.string + remote_tdo_info = \ + remote_lsa.QueryTrustedDomainInfoByName(remote_policy, + lsaString, + lsa.LSA_TRUSTED_DOMAIN_INFO_INFO_EX) + except NTSTATUSError as error: + if not self.check_runtime_error(error, ntstatus.NT_STATUS_OBJECT_NAME_NOT_FOUND): + raise self.RemoteRuntimeError(self, error, "QueryTrustedDomainInfoByName(%s)" % ( + lsaString.string)) + + if remote_tdo_info is not None: + if local_lsa_info.sid != remote_tdo_info.sid or \ + local_lsa_info.name.string != remote_tdo_info.netbios_name.string or \ + local_lsa_info.dns_domain.string != remote_tdo_info.domain_name.string: + raise CommandError("RemoteTDO inconsistent: Netbios[%s] DNS[%s] SID[%s]" % ( + remote_tdo_info.netbios_name.string, + remote_tdo_info.domain_name.string, + remote_tdo_info.sid)) + + if local_tdo_info is not None: + try: + lsaString.string = local_tdo_info.domain_name.string + local_tdo_handle = \ + local_lsa.OpenTrustedDomainByName(local_policy, + lsaString, + security.SEC_STD_DELETE) + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "OpenTrustedDomainByName(%s)" % ( + lsaString.string)) + + local_lsa.DeleteObject(local_tdo_handle) + local_tdo_handle = None + + if remote_tdo_info is not None: + try: + lsaString.string = remote_tdo_info.domain_name.string + remote_tdo_handle = \ + remote_lsa.OpenTrustedDomainByName(remote_policy, + lsaString, + security.SEC_STD_DELETE) + except RuntimeError as error: + raise self.RemoteRuntimeError(self, error, "OpenTrustedDomainByName(%s)" % ( + lsaString.string)) + + if remote_tdo_handle is not None: + try: + remote_lsa.DeleteObject(remote_tdo_handle) + remote_tdo_handle = None + self.outf.write("RemoteTDO deleted.\n") + except RuntimeError as error: + self.outf.write("%s\n" % self.RemoteRuntimeError(self, error, "DeleteObject() failed")) + + return + + +class cmd_domain_trust_validate(DomainTrustCommand): + """Validate a domain trust.""" + + synopsis = "%prog DOMAIN [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + "localdcopts": LocalDCCredentialsOptions, + } + + takes_options = [ + Option("--validate-location", type="choice", metavar="LOCATION", + choices=["local", "both"], + help="Where to validate the trusted domain object: 'local' or 'both'.", + dest='validate_location', + default="both"), + ] + + takes_args = ["domain"] + + def run(self, domain, sambaopts=None, versionopts=None, credopts=None, localdcopts=None, + validate_location=None): + + local_policy_access = lsa.LSA_POLICY_VIEW_LOCAL_INFORMATION + + local_server = self.setup_local_server(sambaopts, localdcopts) + try: + local_lsa = self.new_local_lsa_connection() + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "failed to connect lsa server") + + try: + (local_policy, local_lsa_info) = self.get_lsa_info(local_lsa, local_policy_access) + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "failed to query LSA_POLICY_INFO_DNS") + + self.outf.write("LocalDomain Netbios[%s] DNS[%s] SID[%s]\n" % ( + local_lsa_info.name.string, + local_lsa_info.dns_domain.string, + local_lsa_info.sid)) + + try: + lsaString = lsa.String() + lsaString.string = domain + local_tdo_info = \ + local_lsa.QueryTrustedDomainInfoByName(local_policy, + lsaString, + lsa.LSA_TRUSTED_DOMAIN_INFO_INFO_EX) + except NTSTATUSError as error: + if self.check_runtime_error(error, ntstatus.NT_STATUS_OBJECT_NAME_NOT_FOUND): + raise CommandError("trusted domain object does not exist for domain [%s]" % domain) + + raise self.LocalRuntimeError(self, error, "QueryTrustedDomainInfoByName(INFO_EX) failed") + + self.outf.write("LocalTDO Netbios[%s] DNS[%s] SID[%s]\n" % ( + local_tdo_info.netbios_name.string, + local_tdo_info.domain_name.string, + local_tdo_info.sid)) + + try: + local_netlogon = self.new_local_netlogon_connection() + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "failed to connect netlogon server") + + try: + local_trust_verify = \ + local_netlogon.netr_LogonControl2Ex(local_server, + netlogon.NETLOGON_CONTROL_TC_VERIFY, + 2, + local_tdo_info.domain_name.string) + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "NETLOGON_CONTROL_TC_VERIFY failed") + + local_trust_status = self._uint32(local_trust_verify.pdc_connection_status[0]) + local_conn_status = self._uint32(local_trust_verify.tc_connection_status[0]) + + if local_trust_verify.flags & netlogon.NETLOGON_VERIFY_STATUS_RETURNED: + local_validation = "LocalValidation: DC[%s] CONNECTION[%s] TRUST[%s] VERIFY_STATUS_RETURNED" % ( + local_trust_verify.trusted_dc_name, + local_trust_verify.tc_connection_status[1], + local_trust_verify.pdc_connection_status[1]) + else: + local_validation = "LocalValidation: DC[%s] CONNECTION[%s] TRUST[%s]" % ( + local_trust_verify.trusted_dc_name, + local_trust_verify.tc_connection_status[1], + local_trust_verify.pdc_connection_status[1]) + + if local_trust_status != werror.WERR_SUCCESS or local_conn_status != werror.WERR_SUCCESS: + raise CommandError(local_validation) + else: + self.outf.write("OK: %s\n" % local_validation) + + try: + server = local_trust_verify.trusted_dc_name.replace('\\', '') + domain_and_server = "%s\\%s" % (local_tdo_info.domain_name.string, server) + local_trust_rediscover = \ + local_netlogon.netr_LogonControl2Ex(local_server, + netlogon.NETLOGON_CONTROL_REDISCOVER, + 2, + domain_and_server) + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "NETLOGON_CONTROL_REDISCOVER failed") + + local_conn_status = self._uint32(local_trust_rediscover.tc_connection_status[0]) + local_rediscover = "LocalRediscover: DC[%s] CONNECTION[%s]" % ( + local_trust_rediscover.trusted_dc_name, + local_trust_rediscover.tc_connection_status[1]) + + if local_conn_status != werror.WERR_SUCCESS: + raise CommandError(local_rediscover) + else: + self.outf.write("OK: %s\n" % local_rediscover) + + if validate_location != "local": + try: + remote_server = self.setup_remote_server(credopts, domain, require_pdc=False) + except RuntimeError as error: + raise self.RemoteRuntimeError(self, error, "failed to locate remote server") + + try: + remote_netlogon = self.new_remote_netlogon_connection() + except RuntimeError as error: + raise self.RemoteRuntimeError(self, error, "failed to connect netlogon server") + + try: + remote_trust_verify = \ + remote_netlogon.netr_LogonControl2Ex(remote_server, + netlogon.NETLOGON_CONTROL_TC_VERIFY, + 2, + local_lsa_info.dns_domain.string) + except RuntimeError as error: + raise self.RemoteRuntimeError(self, error, "NETLOGON_CONTROL_TC_VERIFY failed") + + remote_trust_status = self._uint32(remote_trust_verify.pdc_connection_status[0]) + remote_conn_status = self._uint32(remote_trust_verify.tc_connection_status[0]) + + if remote_trust_verify.flags & netlogon.NETLOGON_VERIFY_STATUS_RETURNED: + remote_validation = "RemoteValidation: DC[%s] CONNECTION[%s] TRUST[%s] VERIFY_STATUS_RETURNED" % ( + remote_trust_verify.trusted_dc_name, + remote_trust_verify.tc_connection_status[1], + remote_trust_verify.pdc_connection_status[1]) + else: + remote_validation = "RemoteValidation: DC[%s] CONNECTION[%s] TRUST[%s]" % ( + remote_trust_verify.trusted_dc_name, + remote_trust_verify.tc_connection_status[1], + remote_trust_verify.pdc_connection_status[1]) + + if remote_trust_status != werror.WERR_SUCCESS or remote_conn_status != werror.WERR_SUCCESS: + raise CommandError(remote_validation) + else: + self.outf.write("OK: %s\n" % remote_validation) + + try: + server = remote_trust_verify.trusted_dc_name.replace('\\', '') + domain_and_server = "%s\\%s" % (local_lsa_info.dns_domain.string, server) + remote_trust_rediscover = \ + remote_netlogon.netr_LogonControl2Ex(remote_server, + netlogon.NETLOGON_CONTROL_REDISCOVER, + 2, + domain_and_server) + except RuntimeError as error: + raise self.RemoteRuntimeError(self, error, "NETLOGON_CONTROL_REDISCOVER failed") + + remote_conn_status = self._uint32(remote_trust_rediscover.tc_connection_status[0]) + + remote_rediscover = "RemoteRediscover: DC[%s] CONNECTION[%s]" % ( + remote_trust_rediscover.trusted_dc_name, + remote_trust_rediscover.tc_connection_status[1]) + + if remote_conn_status != werror.WERR_SUCCESS: + raise CommandError(remote_rediscover) + else: + self.outf.write("OK: %s\n" % remote_rediscover) + + return + + +class cmd_domain_trust_namespaces(DomainTrustCommand): + """Manage forest trust namespaces.""" + + synopsis = "%prog [DOMAIN] [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "localdcopts": LocalDCCredentialsOptions, + } + + takes_options = [ + Option("--refresh", type="choice", metavar="check|store", + choices=["check", "store", None], + help="List and maybe store refreshed forest trust information: 'check' or 'store'.", + dest='refresh', + default=None), + Option("--enable-all", action="store_true", + help="Try to update disabled entries, not allowed with --refresh=check.", + dest='enable_all', + default=False), + Option("--enable-tln", action="append", metavar='DNSDOMAIN', + help="Enable a top level name entry. Can be specified multiple times.", + dest='enable_tln', + default=[]), + Option("--disable-tln", action="append", metavar='DNSDOMAIN', + help="Disable a top level name entry. Can be specified multiple times.", + dest='disable_tln', + default=[]), + Option("--add-tln-ex", action="append", metavar='DNSDOMAIN', + help="Add a top level exclusion entry. Can be specified multiple times.", + dest='add_tln_ex', + default=[]), + Option("--delete-tln-ex", action="append", metavar='DNSDOMAIN', + help="Delete a top level exclusion entry. Can be specified multiple times.", + dest='delete_tln_ex', + default=[]), + Option("--enable-nb", action="append", metavar='NETBIOSDOMAIN', + help="Enable a netbios name in a domain entry. Can be specified multiple times.", + dest='enable_nb', + default=[]), + Option("--disable-nb", action="append", metavar='NETBIOSDOMAIN', + help="Disable a netbios name in a domain entry. Can be specified multiple times.", + dest='disable_nb', + default=[]), + Option("--enable-sid", action="append", metavar='DOMAINSID', + help="Enable a SID in a domain entry. Can be specified multiple times.", + dest='enable_sid_str', + default=[]), + Option("--disable-sid", action="append", metavar='DOMAINSID', + help="Disable a SID in a domain entry. Can be specified multiple times.", + dest='disable_sid_str', + default=[]), + Option("--add-upn-suffix", action="append", metavar='DNSDOMAIN', + help="Add a new uPNSuffixes attribute for the local forest. Can be specified multiple times.", + dest='add_upn', + default=[]), + Option("--delete-upn-suffix", action="append", metavar='DNSDOMAIN', + help="Delete an existing uPNSuffixes attribute of the local forest. Can be specified multiple times.", + dest='delete_upn', + default=[]), + Option("--add-spn-suffix", action="append", metavar='DNSDOMAIN', + help="Add a new msDS-SPNSuffixes attribute for the local forest. Can be specified multiple times.", + dest='add_spn', + default=[]), + Option("--delete-spn-suffix", action="append", metavar='DNSDOMAIN', + help="Delete an existing msDS-SPNSuffixes attribute of the local forest. Can be specified multiple times.", + dest='delete_spn', + default=[]), + ] + + takes_args = ["domain?"] + + def run(self, domain=None, sambaopts=None, localdcopts=None, versionopts=None, + refresh=None, enable_all=False, + enable_tln=None, disable_tln=None, add_tln_ex=None, delete_tln_ex=None, + enable_sid_str=None, disable_sid_str=None, enable_nb=None, disable_nb=None, + add_upn=None, delete_upn=None, add_spn=None, delete_spn=None): + + if enable_tln is None: + enable_tln = [] + if disable_tln is None: + disable_tln = [] + if add_tln_ex is None: + add_tln_ex = [] + if delete_tln_ex is None: + delete_tln_ex = [] + if enable_sid_str is None: + enable_sid_str = [] + if disable_sid_str is None: + disable_sid_str = [] + if enable_nb is None: + enable_nb = [] + if disable_nb is None: + disable_nb = [] + if add_upn is None: + add_upn = [] + if delete_upn is None: + delete_upn = [] + if add_spn is None: + add_spn = [] + if delete_spn is None: + delete_spn = [] + + require_update = False + + if domain is None: + if refresh == "store": + raise CommandError("--refresh=%s not allowed without DOMAIN" % refresh) + + if enable_all: + raise CommandError("--enable-all not allowed without DOMAIN") + + if len(enable_tln) > 0: + raise CommandError("--enable-tln not allowed without DOMAIN") + if len(disable_tln) > 0: + raise CommandError("--disable-tln not allowed without DOMAIN") + + if len(add_tln_ex) > 0: + raise CommandError("--add-tln-ex not allowed without DOMAIN") + if len(delete_tln_ex) > 0: + raise CommandError("--delete-tln-ex not allowed without DOMAIN") + + if len(enable_nb) > 0: + raise CommandError("--enable-nb not allowed without DOMAIN") + if len(disable_nb) > 0: + raise CommandError("--disable-nb not allowed without DOMAIN") + + if len(enable_sid_str) > 0: + raise CommandError("--enable-sid not allowed without DOMAIN") + if len(disable_sid_str) > 0: + raise CommandError("--disable-sid not allowed without DOMAIN") + + if len(add_upn) > 0: + for n in add_upn: + if not n.startswith("*."): + continue + raise CommandError("value[%s] specified for --add-upn-suffix should not include with '*.'" % n) + require_update = True + if len(delete_upn) > 0: + for n in delete_upn: + if not n.startswith("*."): + continue + raise CommandError("value[%s] specified for --delete-upn-suffix should not include with '*.'" % n) + require_update = True + for a in add_upn: + for d in delete_upn: + if a.lower() != d.lower(): + continue + raise CommandError("value[%s] specified for --add-upn-suffix and --delete-upn-suffix" % a) + + if len(add_spn) > 0: + for n in add_spn: + if not n.startswith("*."): + continue + raise CommandError("value[%s] specified for --add-spn-suffix should not include with '*.'" % n) + require_update = True + if len(delete_spn) > 0: + for n in delete_spn: + if not n.startswith("*."): + continue + raise CommandError("value[%s] specified for --delete-spn-suffix should not include with '*.'" % n) + require_update = True + for a in add_spn: + for d in delete_spn: + if a.lower() != d.lower(): + continue + raise CommandError("value[%s] specified for --add-spn-suffix and --delete-spn-suffix" % a) + else: + if len(add_upn) > 0: + raise CommandError("--add-upn-suffix not allowed together with DOMAIN") + if len(delete_upn) > 0: + raise CommandError("--delete-upn-suffix not allowed together with DOMAIN") + if len(add_spn) > 0: + raise CommandError("--add-spn-suffix not allowed together with DOMAIN") + if len(delete_spn) > 0: + raise CommandError("--delete-spn-suffix not allowed together with DOMAIN") + + if refresh is not None: + if refresh == "store": + require_update = True + + if enable_all and refresh != "store": + raise CommandError("--enable-all not allowed together with --refresh=%s" % refresh) + + if len(enable_tln) > 0: + raise CommandError("--enable-tln not allowed together with --refresh") + if len(disable_tln) > 0: + raise CommandError("--disable-tln not allowed together with --refresh") + + if len(add_tln_ex) > 0: + raise CommandError("--add-tln-ex not allowed together with --refresh") + if len(delete_tln_ex) > 0: + raise CommandError("--delete-tln-ex not allowed together with --refresh") + + if len(enable_nb) > 0: + raise CommandError("--enable-nb not allowed together with --refresh") + if len(disable_nb) > 0: + raise CommandError("--disable-nb not allowed together with --refresh") + + if len(enable_sid_str) > 0: + raise CommandError("--enable-sid not allowed together with --refresh") + if len(disable_sid_str) > 0: + raise CommandError("--disable-sid not allowed together with --refresh") + else: + if enable_all: + require_update = True + + if len(enable_tln) > 0: + raise CommandError("--enable-tln not allowed together with --enable-all") + + if len(enable_nb) > 0: + raise CommandError("--enable-nb not allowed together with --enable-all") + + if len(enable_sid_str) > 0: + raise CommandError("--enable-sid not allowed together with --enable-all") + + if len(enable_tln) > 0: + require_update = True + if len(disable_tln) > 0: + require_update = True + for e in enable_tln: + for d in disable_tln: + if e.lower() != d.lower(): + continue + raise CommandError("value[%s] specified for --enable-tln and --disable-tln" % e) + + if len(add_tln_ex) > 0: + for n in add_tln_ex: + if not n.startswith("*."): + continue + raise CommandError("value[%s] specified for --add-tln-ex should not include with '*.'" % n) + require_update = True + if len(delete_tln_ex) > 0: + for n in delete_tln_ex: + if not n.startswith("*."): + continue + raise CommandError("value[%s] specified for --delete-tln-ex should not include with '*.'" % n) + require_update = True + for a in add_tln_ex: + for d in delete_tln_ex: + if a.lower() != d.lower(): + continue + raise CommandError("value[%s] specified for --add-tln-ex and --delete-tln-ex" % a) + + if len(enable_nb) > 0: + require_update = True + if len(disable_nb) > 0: + require_update = True + for e in enable_nb: + for d in disable_nb: + if e.upper() != d.upper(): + continue + raise CommandError("value[%s] specified for --enable-nb and --disable-nb" % e) + + enable_sid = [] + for s in enable_sid_str: + try: + sid = security.dom_sid(s) + except (ValueError, TypeError): + raise CommandError("value[%s] specified for --enable-sid is not a valid SID" % s) + enable_sid.append(sid) + disable_sid = [] + for s in disable_sid_str: + try: + sid = security.dom_sid(s) + except (ValueError, TypeError): + raise CommandError("value[%s] specified for --disable-sid is not a valid SID" % s) + disable_sid.append(sid) + if len(enable_sid) > 0: + require_update = True + if len(disable_sid) > 0: + require_update = True + for e in enable_sid: + for d in disable_sid: + if e != d: + continue + raise CommandError("value[%s] specified for --enable-sid and --disable-sid" % e) + + local_policy_access = lsa.LSA_POLICY_VIEW_LOCAL_INFORMATION + if require_update: + local_policy_access |= lsa.LSA_POLICY_TRUST_ADMIN + + local_server = self.setup_local_server(sambaopts, localdcopts) + try: + local_lsa = self.new_local_lsa_connection() + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "failed to connect lsa server") + + try: + (local_policy, local_lsa_info) = self.get_lsa_info(local_lsa, local_policy_access) + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "failed to query LSA_POLICY_INFO_DNS") + + self.outf.write("LocalDomain Netbios[%s] DNS[%s] SID[%s]\n" % ( + local_lsa_info.name.string, + local_lsa_info.dns_domain.string, + local_lsa_info.sid)) + + if domain is None: + try: + local_netlogon = self.new_local_netlogon_connection() + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "failed to connect netlogon server") + + try: + local_netlogon_info = self.get_netlogon_dc_info(local_netlogon, local_server) + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "failed to get netlogon dc info") + + if local_netlogon_info.domain_name != local_netlogon_info.forest_name: + raise CommandError("The local domain [%s] is not the forest root [%s]" % ( + local_netlogon_info.domain_name, + local_netlogon_info.forest_name)) + + try: + # get all information about our own forest + own_forest_info = local_netlogon.netr_DsRGetForestTrustInformation(local_netlogon_info.dc_unc, + None, 0) + except RuntimeError as error: + if self.check_runtime_error(error, werror.WERR_RPC_S_PROCNUM_OUT_OF_RANGE): + raise CommandError("LOCAL_DC[%s]: netr_DsRGetForestTrustInformation() not supported." % ( + local_server)) + + if self.check_runtime_error(error, werror.WERR_INVALID_FUNCTION): + raise CommandError("LOCAL_DC[%s]: netr_DsRGetForestTrustInformation() not supported." % ( + local_server)) + + if self.check_runtime_error(error, werror.WERR_NERR_ACFNOTLOADED): + raise CommandError("LOCAL_DC[%s]: netr_DsRGetForestTrustInformation() not supported." % ( + local_server)) + + raise self.LocalRuntimeError(self, error, "netr_DsRGetForestTrustInformation() failed") + + self.outf.write("Own forest trust information...\n") + self.write_forest_trust_info(own_forest_info, + tln=local_lsa_info.dns_domain.string) + + try: + local_samdb = self.new_local_ldap_connection() + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "failed to connect to SamDB") + + local_partitions_dn = "CN=Partitions,%s" % str(local_samdb.get_config_basedn()) + attrs = ['uPNSuffixes', 'msDS-SPNSuffixes'] + try: + msgs = local_samdb.search(base=local_partitions_dn, + scope=ldb.SCOPE_BASE, + expression="(objectClass=crossRefContainer)", + attrs=attrs) + stored_msg = msgs[0] + except ldb.LdbError as error: + raise self.LocalLdbError(self, error, "failed to search partition dn") + + stored_upn_vals = [] + if 'uPNSuffixes' in stored_msg: + stored_upn_vals.extend(stored_msg['uPNSuffixes']) + + stored_spn_vals = [] + if 'msDS-SPNSuffixes' in stored_msg: + stored_spn_vals.extend(stored_msg['msDS-SPNSuffixes']) + + self.outf.write("Stored uPNSuffixes attributes[%d]:\n" % len(stored_upn_vals)) + for v in stored_upn_vals: + self.outf.write("TLN: %-32s DNS[*.%s]\n" % ("", v)) + self.outf.write("Stored msDS-SPNSuffixes attributes[%d]:\n" % len(stored_spn_vals)) + for v in stored_spn_vals: + self.outf.write("TLN: %-32s DNS[*.%s]\n" % ("", v)) + + if not require_update: + return + + replace_upn = False + update_upn_vals = [] + update_upn_vals.extend(stored_upn_vals) + + replace_spn = False + update_spn_vals = [] + update_spn_vals.extend(stored_spn_vals) + + for upn in add_upn: + for v in update_upn_vals: + if str(v).lower() == upn.lower(): + raise CommandError("Entry already present for " + "value[%s] specified for " + "--add-upn-suffix" % upn) + update_upn_vals.append(upn) + replace_upn = True + + for upn in delete_upn: + idx = None + for i, v in enumerate(update_upn_vals): + if str(v).lower() != upn.lower(): + continue + idx = i + break + if idx is None: + raise CommandError("Entry not found for value[%s] specified for --delete-upn-suffix" % upn) + + update_upn_vals.pop(idx) + replace_upn = True + + for spn in add_spn: + for v in update_spn_vals: + if str(v).lower() == spn.lower(): + raise CommandError("Entry already present for " + "value[%s] specified for " + "--add-spn-suffix" % spn) + update_spn_vals.append(spn) + replace_spn = True + + for spn in delete_spn: + idx = None + for i, v in enumerate(update_spn_vals): + if str(v).lower() != spn.lower(): + continue + idx = i + break + if idx is None: + raise CommandError("Entry not found for value[%s] specified for --delete-spn-suffix" % spn) + + update_spn_vals.pop(idx) + replace_spn = True + + self.outf.write("Update uPNSuffixes attributes[%d]:\n" % len(update_upn_vals)) + for v in update_upn_vals: + self.outf.write("TLN: %-32s DNS[*.%s]\n" % ("", v)) + self.outf.write("Update msDS-SPNSuffixes attributes[%d]:\n" % len(update_spn_vals)) + for v in update_spn_vals: + self.outf.write("TLN: %-32s DNS[*.%s]\n" % ("", v)) + + update_msg = ldb.Message() + update_msg.dn = stored_msg.dn + + if replace_upn: + update_msg['uPNSuffixes'] = ldb.MessageElement(update_upn_vals, + ldb.FLAG_MOD_REPLACE, + 'uPNSuffixes') + if replace_spn: + update_msg['msDS-SPNSuffixes'] = ldb.MessageElement(update_spn_vals, + ldb.FLAG_MOD_REPLACE, + 'msDS-SPNSuffixes') + try: + local_samdb.modify(update_msg) + except ldb.LdbError as error: + raise self.LocalLdbError(self, error, "failed to update partition dn") + + try: + stored_forest_info = local_netlogon.netr_DsRGetForestTrustInformation(local_netlogon_info.dc_unc, + None, 0) + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "netr_DsRGetForestTrustInformation() failed") + + self.outf.write("Stored forest trust information...\n") + self.write_forest_trust_info(stored_forest_info, + tln=local_lsa_info.dns_domain.string) + return + + try: + lsaString = lsa.String() + lsaString.string = domain + local_tdo_info = \ + local_lsa.QueryTrustedDomainInfoByName(local_policy, + lsaString, + lsa.LSA_TRUSTED_DOMAIN_INFO_INFO_EX) + except NTSTATUSError as error: + if self.check_runtime_error(error, ntstatus.NT_STATUS_OBJECT_NAME_NOT_FOUND): + raise CommandError("trusted domain object does not exist for domain [%s]" % domain) + + raise self.LocalRuntimeError(self, error, "QueryTrustedDomainInfoByName(INFO_EX) failed") + + self.outf.write("LocalTDO Netbios[%s] DNS[%s] SID[%s]\n" % ( + local_tdo_info.netbios_name.string, + local_tdo_info.domain_name.string, + local_tdo_info.sid)) + + if not local_tdo_info.trust_attributes & lsa.LSA_TRUST_ATTRIBUTE_FOREST_TRANSITIVE: + raise CommandError("trusted domain object for domain [%s] is not marked as FOREST_TRANSITIVE." % domain) + + if refresh is not None: + try: + local_netlogon = self.new_local_netlogon_connection() + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "failed to connect netlogon server") + + try: + local_netlogon_info = self.get_netlogon_dc_info(local_netlogon, local_server) + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "failed to get netlogon dc info") + + lsa_update_check = 1 + if refresh == "store": + netlogon_update_tdo = netlogon.DS_GFTI_UPDATE_TDO + if enable_all: + lsa_update_check = 0 + else: + netlogon_update_tdo = 0 + + try: + # get all information about the remote trust + # this triggers netr_GetForestTrustInformation to the remote domain + # and lsaRSetForestTrustInformation() locally, but new top level + # names are disabled by default. + fresh_forest_info = \ + local_netlogon.netr_DsRGetForestTrustInformation(local_netlogon_info.dc_unc, + local_tdo_info.domain_name.string, + netlogon_update_tdo) + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "netr_DsRGetForestTrustInformation() failed") + + try: + fresh_forest_collision = \ + local_lsa.lsaRSetForestTrustInformation(local_policy, + local_tdo_info.domain_name, + lsa.LSA_FOREST_TRUST_DOMAIN_INFO, + fresh_forest_info, + lsa_update_check) + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "lsaRSetForestTrustInformation() failed") + + self.outf.write("Fresh forest trust information...\n") + self.write_forest_trust_info(fresh_forest_info, + tln=local_tdo_info.domain_name.string, + collisions=fresh_forest_collision) + + if refresh == "store": + try: + lsaString = lsa.String() + lsaString.string = local_tdo_info.domain_name.string + stored_forest_info = \ + local_lsa.lsaRQueryForestTrustInformation(local_policy, + lsaString, + lsa.LSA_FOREST_TRUST_DOMAIN_INFO) + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "lsaRQueryForestTrustInformation() failed") + + self.outf.write("Stored forest trust information...\n") + self.write_forest_trust_info(stored_forest_info, + tln=local_tdo_info.domain_name.string) + + return + + # + # The none --refresh path + # + + try: + lsaString = lsa.String() + lsaString.string = local_tdo_info.domain_name.string + local_forest_info = \ + local_lsa.lsaRQueryForestTrustInformation(local_policy, + lsaString, + lsa.LSA_FOREST_TRUST_DOMAIN_INFO) + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "lsaRQueryForestTrustInformation() failed") + + self.outf.write("Local forest trust information...\n") + self.write_forest_trust_info(local_forest_info, + tln=local_tdo_info.domain_name.string) + + if not require_update: + return + + entries = [] + entries.extend(local_forest_info.entries) + update_forest_info = lsa.ForestTrustInformation() + update_forest_info.count = len(entries) + update_forest_info.entries = entries + + if enable_all: + for r in update_forest_info.entries: + if r.type != lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME: + continue + if r.flags == 0: + continue + r.time = 0 + r.flags &= ~lsa.LSA_TLN_DISABLED_MASK + for r in update_forest_info.entries: + if r.type != lsa.LSA_FOREST_TRUST_DOMAIN_INFO: + continue + if r.flags == 0: + continue + r.time = 0 + r.flags &= ~lsa.LSA_NB_DISABLED_MASK + r.flags &= ~lsa.LSA_SID_DISABLED_MASK + + for tln in enable_tln: + idx = None + for i, r in enumerate(update_forest_info.entries): + if r.type != lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME: + continue + if r.forest_trust_data.string.lower() != tln.lower(): + continue + idx = i + break + if idx is None: + raise CommandError("Entry not found for value[%s] specified for --enable-tln" % tln) + if not update_forest_info.entries[idx].flags & lsa.LSA_TLN_DISABLED_MASK: + raise CommandError("Entry found for value[%s] specified for --enable-tln is already enabled" % tln) + update_forest_info.entries[idx].time = 0 + update_forest_info.entries[idx].flags &= ~lsa.LSA_TLN_DISABLED_MASK + + for tln in disable_tln: + idx = None + for i, r in enumerate(update_forest_info.entries): + if r.type != lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME: + continue + if r.forest_trust_data.string.lower() != tln.lower(): + continue + idx = i + break + if idx is None: + raise CommandError("Entry not found for value[%s] specified for --disable-tln" % tln) + if update_forest_info.entries[idx].flags & lsa.LSA_TLN_DISABLED_ADMIN: + raise CommandError("Entry found for value[%s] specified for --disable-tln is already disabled" % tln) + update_forest_info.entries[idx].time = 0 + update_forest_info.entries[idx].flags &= ~lsa.LSA_TLN_DISABLED_MASK + update_forest_info.entries[idx].flags |= lsa.LSA_TLN_DISABLED_ADMIN + + for tln_ex in add_tln_ex: + idx = None + for i, r in enumerate(update_forest_info.entries): + if r.type != lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME_EX: + continue + if r.forest_trust_data.string.lower() != tln_ex.lower(): + continue + idx = i + break + if idx is not None: + raise CommandError("Entry already present for value[%s] specified for --add-tln-ex" % tln_ex) + + tln_dot = ".%s" % tln_ex.lower() + idx = None + for i, r in enumerate(update_forest_info.entries): + if r.type != lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME: + continue + r_dot = ".%s" % r.forest_trust_data.string.lower() + if tln_dot == r_dot: + raise CommandError("TLN entry present for value[%s] specified for --add-tln-ex" % tln_ex) + if not tln_dot.endswith(r_dot): + continue + idx = i + break + + if idx is None: + raise CommandError("No TLN parent present for value[%s] specified for --add-tln-ex" % tln_ex) + + r = lsa.ForestTrustRecord() + r.type = lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME_EX + r.flags = 0 + r.time = 0 + r.forest_trust_data.string = tln_ex + + entries = [] + entries.extend(update_forest_info.entries) + entries.insert(idx + 1, r) + update_forest_info.count = len(entries) + update_forest_info.entries = entries + + for tln_ex in delete_tln_ex: + idx = None + for i, r in enumerate(update_forest_info.entries): + if r.type != lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME_EX: + continue + if r.forest_trust_data.string.lower() != tln_ex.lower(): + continue + idx = i + break + if idx is None: + raise CommandError("Entry not found for value[%s] specified for --delete-tln-ex" % tln_ex) + + entries = [] + entries.extend(update_forest_info.entries) + entries.pop(idx) + update_forest_info.count = len(entries) + update_forest_info.entries = entries + + for nb in enable_nb: + idx = None + for i, r in enumerate(update_forest_info.entries): + if r.type != lsa.LSA_FOREST_TRUST_DOMAIN_INFO: + continue + if r.forest_trust_data.netbios_domain_name.string.upper() != nb.upper(): + continue + idx = i + break + if idx is None: + raise CommandError("Entry not found for value[%s] specified for --enable-nb" % nb) + if not update_forest_info.entries[idx].flags & lsa.LSA_NB_DISABLED_MASK: + raise CommandError("Entry found for value[%s] specified for --enable-nb is already enabled" % nb) + update_forest_info.entries[idx].time = 0 + update_forest_info.entries[idx].flags &= ~lsa.LSA_NB_DISABLED_MASK + + for nb in disable_nb: + idx = None + for i, r in enumerate(update_forest_info.entries): + if r.type != lsa.LSA_FOREST_TRUST_DOMAIN_INFO: + continue + if r.forest_trust_data.netbios_domain_name.string.upper() != nb.upper(): + continue + idx = i + break + if idx is None: + raise CommandError("Entry not found for value[%s] specified for --delete-nb" % nb) + if update_forest_info.entries[idx].flags & lsa.LSA_NB_DISABLED_ADMIN: + raise CommandError("Entry found for value[%s] specified for --disable-nb is already disabled" % nb) + update_forest_info.entries[idx].time = 0 + update_forest_info.entries[idx].flags &= ~lsa.LSA_NB_DISABLED_MASK + update_forest_info.entries[idx].flags |= lsa.LSA_NB_DISABLED_ADMIN + + for sid in enable_sid: + idx = None + for i, r in enumerate(update_forest_info.entries): + if r.type != lsa.LSA_FOREST_TRUST_DOMAIN_INFO: + continue + if r.forest_trust_data.domain_sid != sid: + continue + idx = i + break + if idx is None: + raise CommandError("Entry not found for value[%s] specified for --enable-sid" % sid) + if not update_forest_info.entries[idx].flags & lsa.LSA_SID_DISABLED_MASK: + raise CommandError("Entry found for value[%s] specified for --enable-sid is already enabled" % nb) + update_forest_info.entries[idx].time = 0 + update_forest_info.entries[idx].flags &= ~lsa.LSA_SID_DISABLED_MASK + + for sid in disable_sid: + idx = None + for i, r in enumerate(update_forest_info.entries): + if r.type != lsa.LSA_FOREST_TRUST_DOMAIN_INFO: + continue + if r.forest_trust_data.domain_sid != sid: + continue + idx = i + break + if idx is None: + raise CommandError("Entry not found for value[%s] specified for --delete-sid" % sid) + if update_forest_info.entries[idx].flags & lsa.LSA_SID_DISABLED_ADMIN: + raise CommandError("Entry found for value[%s] specified for --disable-sid is already disabled" % nb) + update_forest_info.entries[idx].time = 0 + update_forest_info.entries[idx].flags &= ~lsa.LSA_SID_DISABLED_MASK + update_forest_info.entries[idx].flags |= lsa.LSA_SID_DISABLED_ADMIN + + try: + update_forest_collision = local_lsa.lsaRSetForestTrustInformation(local_policy, + local_tdo_info.domain_name, + lsa.LSA_FOREST_TRUST_DOMAIN_INFO, + update_forest_info, 0) + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "lsaRSetForestTrustInformation() failed") + + self.outf.write("Updated forest trust information...\n") + self.write_forest_trust_info(update_forest_info, + tln=local_tdo_info.domain_name.string, + collisions=update_forest_collision) + + try: + lsaString = lsa.String() + lsaString.string = local_tdo_info.domain_name.string + stored_forest_info = local_lsa.lsaRQueryForestTrustInformation(local_policy, + lsaString, + lsa.LSA_FOREST_TRUST_DOMAIN_INFO) + except RuntimeError as error: + raise self.LocalRuntimeError(self, error, "lsaRQueryForestTrustInformation() failed") + + self.outf.write("Stored forest trust information...\n") + self.write_forest_trust_info(stored_forest_info, + tln=local_tdo_info.domain_name.string) + return + + +class cmd_domain_trust(SuperCommand): + """Domain and forest trust management.""" + + subcommands = {} + subcommands["list"] = cmd_domain_trust_list() + subcommands["show"] = cmd_domain_trust_show() + subcommands["create"] = cmd_domain_trust_create() + subcommands["modify"] = cmd_domain_trust_modify() + subcommands["delete"] = cmd_domain_trust_delete() + subcommands["validate"] = cmd_domain_trust_validate() + subcommands["namespaces"] = cmd_domain_trust_namespaces() diff --git a/python/samba/netcmd/drs.py b/python/samba/netcmd/drs.py new file mode 100644 index 0000000..c5a9f48 --- /dev/null +++ b/python/samba/netcmd/drs.py @@ -0,0 +1,874 @@ +# implement samba_tool drs commands +# +# Copyright Andrew Tridgell 2010 +# Copyright Andrew Bartlett 2017 +# +# based on C implementation by Kamen Mazdrashki +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import samba.getopt as options +import ldb +import logging +from . import common +import json + +from samba.auth import system_session +from samba.netcmd import ( + Command, + CommandError, + Option, + SuperCommand, +) +from samba.netcmd.common import attr_default +from samba.samdb import SamDB +from samba import drs_utils, nttime2string, dsdb +from samba.dcerpc import drsuapi, misc +from samba.join import join_clone +from samba import colour + +from samba.uptodateness import ( + get_partition_maps, + get_utdv_edges, + get_utdv_distances, + get_utdv_summary, + get_kcc_and_dsas, +) +from samba.common import get_string +from samba.samdb import get_default_backend_store + +def drsuapi_connect(ctx): + """make a DRSUAPI connection to the server""" + try: + (ctx.drsuapi, ctx.drsuapi_handle, ctx.bind_supported_extensions) = drs_utils.drsuapi_connect(ctx.server, ctx.lp, ctx.creds) + except Exception as e: + raise CommandError("DRS connection to %s failed" % ctx.server, e) + + +def samdb_connect(ctx): + """make a ldap connection to the server""" + try: + ctx.samdb = SamDB(url="ldap://%s" % ctx.server, + session_info=system_session(), + credentials=ctx.creds, lp=ctx.lp) + except Exception as e: + raise CommandError("LDAP connection to %s failed" % ctx.server, e) + + +def drs_errmsg(werr): + """return "was successful" or an error string""" + (ecode, estring) = werr + if ecode == 0: + return "was successful" + return "failed, result %u (%s)" % (ecode, estring) + + +def drs_parse_ntds_dn(ntds_dn): + """parse a NTDS DN returning a site and server""" + a = ntds_dn.split(',') + if a[0] != "CN=NTDS Settings" or a[2] != "CN=Servers" or a[4] != 'CN=Sites': + raise RuntimeError("bad NTDS DN %s" % ntds_dn) + server = a[1].split('=')[1] + site = a[3].split('=')[1] + return (site, server) + + +DEFAULT_SHOWREPL_FORMAT = 'classic' + + +class cmd_drs_showrepl(Command): + """Show replication status.""" + + synopsis = "%prog [] [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("--json", help="replication details in JSON format", + dest='format', action='store_const', const='json'), + Option("--summary", help=("summarize overall DRS health as seen " + "from this server"), + dest='format', action='store_const', const='summary'), + Option("--pull-summary", help=("Have we successfully replicated " + "from all relevant servers?"), + dest='format', action='store_const', const='pull_summary'), + Option("--notify-summary", action='store_const', + const='notify_summary', dest='format', + help=("Have we successfully notified all relevant servers of " + "local changes, and did they say they successfully " + "replicated?")), + Option("--classic", help="print local replication details", + dest='format', action='store_const', const='classic', + default=DEFAULT_SHOWREPL_FORMAT), + Option("-v", "--verbose", help="Be verbose", action="store_true"), + ] + + takes_args = ["DC?"] + + def parse_neighbour(self, n): + """Convert an ldb neighbour object into a python dictionary""" + dsa_objectguid = str(n.source_dsa_obj_guid) + d = { + 'NC dn': n.naming_context_dn, + "DSA objectGUID": dsa_objectguid, + "last attempt time": nttime2string(n.last_attempt), + "last attempt message": drs_errmsg(n.result_last_attempt), + "consecutive failures": n.consecutive_sync_failures, + "last success": nttime2string(n.last_success), + "NTDS DN": str(n.source_dsa_obj_dn), + 'is deleted': False + } + + try: + self.samdb.search(base="" % dsa_objectguid, + scope=ldb.SCOPE_BASE, + attrs=[]) + except ldb.LdbError as e: + (errno, _) = e.args + if errno == ldb.ERR_NO_SUCH_OBJECT: + d['is deleted'] = True + else: + raise + try: + (site, server) = drs_parse_ntds_dn(n.source_dsa_obj_dn) + d["DSA"] = "%s\\%s" % (site, server) + except RuntimeError: + pass + return d + + def print_neighbour(self, d): + """print one set of neighbour information""" + self.message("%s" % d['NC dn']) + if 'DSA' in d: + self.message("\t%s via RPC" % d['DSA']) + else: + self.message("\tNTDS DN: %s" % d['NTDS DN']) + self.message("\t\tDSA object GUID: %s" % d['DSA objectGUID']) + self.message("\t\tLast attempt @ %s %s" % (d['last attempt time'], + d['last attempt message'])) + self.message("\t\t%u consecutive failure(s)." % + d['consecutive failures']) + self.message("\t\tLast success @ %s" % d['last success']) + self.message("") + + def get_neighbours(self, info_type): + req1 = drsuapi.DsReplicaGetInfoRequest1() + req1.info_type = info_type + try: + (info_type, info) = self.drsuapi.DsReplicaGetInfo( + self.drsuapi_handle, 1, req1) + except Exception as e: + raise CommandError("DsReplicaGetInfo of type %u failed" % info_type, e) + + reps = [self.parse_neighbour(n) for n in info.array] + return reps + + def run(self, DC=None, sambaopts=None, + credopts=None, versionopts=None, + format=DEFAULT_SHOWREPL_FORMAT, + verbose=False): + self.lp = sambaopts.get_loadparm() + if DC is None: + DC = common.netcmd_dnsname(self.lp) + self.server = DC + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + self.verbose = verbose + + output_function = { + 'summary': self.summary_output, + 'notify_summary': self.notify_summary_output, + 'pull_summary': self.pull_summary_output, + 'json': self.json_output, + 'classic': self.classic_output, + }.get(format) + if output_function is None: + raise CommandError("unknown showrepl format %s" % format) + + return output_function() + + def json_output(self): + data = self.get_local_repl_data() + del data['site'] + del data['server'] + json.dump(data, self.outf, indent=2) + + def summary_output_handler(self, typeof_output): + """Print a short message if every seems fine, but print details of any + links that seem broken.""" + failing_repsto = [] + failing_repsfrom = [] + + local_data = self.get_local_repl_data() + + if typeof_output != "pull_summary": + for rep in local_data['repsTo']: + if rep['is deleted']: + continue + if rep["consecutive failures"] != 0 or rep["last success"] == 0: + failing_repsto.append(rep) + + if typeof_output != "notify_summary": + for rep in local_data['repsFrom']: + if rep['is deleted']: + continue + if rep["consecutive failures"] != 0 or rep["last success"] == 0: + failing_repsfrom.append(rep) + + if failing_repsto or failing_repsfrom: + self.message(colour.c_RED("There are failing connections")) + if failing_repsto: + self.message(colour.c_RED("Failing outbound connections:")) + for rep in failing_repsto: + self.print_neighbour(rep) + if failing_repsfrom: + self.message(colour.c_RED("Failing inbound connection:")) + for rep in failing_repsfrom: + self.print_neighbour(rep) + + return 1 + + self.message(colour.c_GREEN("[ALL GOOD]")) + + def summary_output(self): + return self.summary_output_handler("summary") + + def notify_summary_output(self): + return self.summary_output_handler("notify_summary") + + def pull_summary_output(self): + return self.summary_output_handler("pull_summary") + + def get_local_repl_data(self): + drsuapi_connect(self) + samdb_connect(self) + + # show domain information + ntds_dn = self.samdb.get_dsServiceName() + + (site, server) = drs_parse_ntds_dn(ntds_dn) + try: + ntds = self.samdb.search(base=ntds_dn, scope=ldb.SCOPE_BASE, attrs=['options', 'objectGUID', 'invocationId']) + except Exception as e: + raise CommandError("Failed to search NTDS DN %s" % ntds_dn) + + dsa_details = { + "options": int(attr_default(ntds[0], "options", 0)), + "objectGUID": get_string(self.samdb.schema_format_value( + "objectGUID", ntds[0]["objectGUID"][0])), + "invocationId": get_string(self.samdb.schema_format_value( + "objectGUID", ntds[0]["invocationId"][0])) + } + + conn = self.samdb.search(base=ntds_dn, expression="(objectClass=nTDSConnection)") + repsfrom = self.get_neighbours(drsuapi.DRSUAPI_DS_REPLICA_INFO_NEIGHBORS) + repsto = self.get_neighbours(drsuapi.DRSUAPI_DS_REPLICA_INFO_REPSTO) + + conn_details = [] + for c in conn: + c_rdn, sep, c_server_dn = str(c['fromServer'][0]).partition(',') + d = { + 'name': str(c['name']), + 'remote DN': str(c['fromServer'][0]), + 'options': int(attr_default(c, 'options', 0)), + 'enabled': (get_string(attr_default(c, 'enabledConnection', + 'TRUE')).upper() == 'TRUE') + } + + conn_details.append(d) + try: + c_server_res = self.samdb.search(base=c_server_dn, + scope=ldb.SCOPE_BASE, + attrs=["dnsHostName"]) + d['dns name'] = str(c_server_res[0]["dnsHostName"][0]) + except ldb.LdbError as e: + (errno, _) = e.args + if errno == ldb.ERR_NO_SUCH_OBJECT: + d['is deleted'] = True + except (KeyError, IndexError): + pass + + d['replicates NC'] = [] + for r in c.get('mS-DS-ReplicatesNCReason', []): + a = str(r).split(':') + d['replicates NC'].append((a[3], int(a[2]))) + + return { + 'dsa': dsa_details, + 'repsFrom': repsfrom, + 'repsTo': repsto, + 'NTDSConnections': conn_details, + 'site': site, + 'server': server + } + + def classic_output(self): + data = self.get_local_repl_data() + dsa_details = data['dsa'] + repsfrom = data['repsFrom'] + repsto = data['repsTo'] + conn_details = data['NTDSConnections'] + site = data['site'] + server = data['server'] + + self.message("%s\\%s" % (site, server)) + self.message("DSA Options: 0x%08x" % dsa_details["options"]) + self.message("DSA object GUID: %s" % dsa_details["objectGUID"]) + self.message("DSA invocationId: %s\n" % dsa_details["invocationId"]) + + self.message("==== INBOUND NEIGHBORS ====\n") + for n in repsfrom: + self.print_neighbour(n) + + self.message("==== OUTBOUND NEIGHBORS ====\n") + for n in repsto: + self.print_neighbour(n) + + reasons = ['NTDSCONN_KCC_GC_TOPOLOGY', + 'NTDSCONN_KCC_RING_TOPOLOGY', + 'NTDSCONN_KCC_MINIMIZE_HOPS_TOPOLOGY', + 'NTDSCONN_KCC_STALE_SERVERS_TOPOLOGY', + 'NTDSCONN_KCC_OSCILLATING_CONNECTION_TOPOLOGY', + 'NTDSCONN_KCC_INTERSITE_GC_TOPOLOGY', + 'NTDSCONN_KCC_INTERSITE_TOPOLOGY', + 'NTDSCONN_KCC_SERVER_FAILOVER_TOPOLOGY', + 'NTDSCONN_KCC_SITE_FAILOVER_TOPOLOGY', + 'NTDSCONN_KCC_REDUNDANT_SERVER_TOPOLOGY'] + + self.message("==== KCC CONNECTION OBJECTS ====\n") + for d in conn_details: + self.message("Connection --") + if d.get('is deleted'): + self.message("\tWARNING: Connection to DELETED server!") + + self.message("\tConnection name: %s" % d['name']) + self.message("\tEnabled : %s" % str(d['enabled']).upper()) + self.message("\tServer DNS name : %s" % d.get('dns name')) + self.message("\tServer DN name : %s" % d['remote DN']) + self.message("\t\tTransportType: RPC") + self.message("\t\toptions: 0x%08X" % d['options']) + + if d['replicates NC']: + for nc, reason in d['replicates NC']: + self.message("\t\tReplicatesNC: %s" % nc) + self.message("\t\tReason: 0x%08x" % reason) + for s in reasons: + if getattr(dsdb, s, 0) & reason: + self.message("\t\t\t%s" % s) + else: + self.message("Warning: No NC replicated for Connection!") + + +class cmd_drs_kcc(Command): + """Trigger knowledge consistency center run.""" + + synopsis = "%prog [] [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_args = ["DC?"] + + def run(self, DC=None, sambaopts=None, + credopts=None, versionopts=None): + + self.lp = sambaopts.get_loadparm() + if DC is None: + DC = common.netcmd_dnsname(self.lp) + self.server = DC + + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + drsuapi_connect(self) + + req1 = drsuapi.DsExecuteKCC1() + try: + self.drsuapi.DsExecuteKCC(self.drsuapi_handle, 1, req1) + except Exception as e: + raise CommandError("DsExecuteKCC failed", e) + self.message("Consistency check on %s successful." % DC) + + +class cmd_drs_replicate(Command): + """Replicate a naming context between two DCs.""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_args = ["DEST_DC", "SOURCE_DC", "NC"] + + takes_options = [ + Option("--add-ref", help="use ADD_REF to add to repsTo on source", action="store_true"), + Option("--sync-forced", help="use SYNC_FORCED to force inbound replication", action="store_true"), + Option("--sync-all", help="use SYNC_ALL to replicate from all DCs", action="store_true"), + Option("--full-sync", help="resync all objects", action="store_true"), + Option("--local", help="pull changes directly into the local database (destination DC is ignored)", action="store_true"), + Option("--local-online", help="pull changes into the local database (destination DC is ignored) as a normal online replication", action="store_true"), + Option("--async-op", help="use ASYNC_OP for the replication", action="store_true"), + Option("--single-object", help="Replicate only the object specified, instead of the whole Naming Context (only with --local)", action="store_true"), + ] + + def drs_local_replicate(self, SOURCE_DC, NC, full_sync=False, + single_object=False, + sync_forced=False): + """replicate from a source DC to the local SAM""" + + self.server = SOURCE_DC + drsuapi_connect(self) + + # Override the default flag LDB_FLG_DONT_CREATE_DB + self.local_samdb = SamDB(session_info=system_session(), url=None, + credentials=self.creds, lp=self.lp, + flags=0) + + self.samdb = SamDB(url="ldap://%s" % self.server, + session_info=system_session(), + credentials=self.creds, lp=self.lp) + + # work out the source and destination GUIDs + res = self.local_samdb.search(base="", scope=ldb.SCOPE_BASE, + attrs=["dsServiceName"]) + self.ntds_dn = res[0]["dsServiceName"][0] + + res = self.local_samdb.search(base=self.ntds_dn, scope=ldb.SCOPE_BASE, + attrs=["objectGUID"]) + self.ntds_guid = misc.GUID( + self.samdb.schema_format_value("objectGUID", + res[0]["objectGUID"][0])) + + source_dsa_invocation_id = misc.GUID(self.samdb.get_invocation_id()) + dest_dsa_invocation_id = misc.GUID(self.local_samdb.get_invocation_id()) + destination_dsa_guid = self.ntds_guid + + exop = drsuapi.DRSUAPI_EXOP_NONE + + if single_object: + exop = drsuapi.DRSUAPI_EXOP_REPL_OBJ + full_sync = True + + self.samdb.transaction_start() + repl = drs_utils.drs_Replicate("ncacn_ip_tcp:%s[seal]" % self.server, + self.lp, + self.creds, self.local_samdb, + dest_dsa_invocation_id) + + # Work out if we are an RODC, so that a forced local replicate + # with the admin pw does not sync passwords + rodc = self.local_samdb.am_rodc() + try: + (num_objects, num_links) = repl.replicate(NC, + source_dsa_invocation_id, + destination_dsa_guid, + rodc=rodc, + full_sync=full_sync, + exop=exop, + sync_forced=sync_forced) + except Exception as e: + raise CommandError("Error replicating DN %s" % NC, e) + self.samdb.transaction_commit() + + if full_sync: + self.message("Full Replication of all %d objects and %d links " + "from %s to %s was successful." % + (num_objects, num_links, SOURCE_DC, + self.local_samdb.url)) + else: + self.message("Incremental replication of %d objects and %d links " + "from %s to %s was successful." % + (num_objects, num_links, SOURCE_DC, + self.local_samdb.url)) + + def run(self, DEST_DC, SOURCE_DC, NC, + add_ref=False, sync_forced=False, sync_all=False, full_sync=False, + local=False, local_online=False, async_op=False, single_object=False, + sambaopts=None, credopts=None, versionopts=None): + + self.server = DEST_DC + self.lp = sambaopts.get_loadparm() + + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + if local: + self.drs_local_replicate(SOURCE_DC, NC, full_sync=full_sync, + single_object=single_object, + sync_forced=sync_forced) + return + + if local_online: + server_bind = drsuapi.drsuapi("irpc:dreplsrv", lp_ctx=self.lp) + server_bind_handle = misc.policy_handle() + else: + drsuapi_connect(self) + server_bind = self.drsuapi + server_bind_handle = self.drsuapi_handle + + if not async_op: + # Give the sync replication 5 minutes time + server_bind.request_timeout = 5 * 60 + + samdb_connect(self) + + # we need to find the NTDS GUID of the source DC + msg = self.samdb.search(base=self.samdb.get_config_basedn(), + expression="(&(objectCategory=server)(|(name=%s)(dNSHostName=%s)))" % ( + ldb.binary_encode(SOURCE_DC), + ldb.binary_encode(SOURCE_DC)), + attrs=[]) + if len(msg) == 0: + raise CommandError("Failed to find source DC %s" % SOURCE_DC) + server_dn = msg[0]['dn'] + + msg = self.samdb.search(base=server_dn, scope=ldb.SCOPE_ONELEVEL, + expression="(|(objectCategory=nTDSDSA)(objectCategory=nTDSDSARO))", + attrs=['objectGUID', 'options']) + if len(msg) == 0: + raise CommandError("Failed to find source NTDS DN %s" % SOURCE_DC) + source_dsa_guid = msg[0]['objectGUID'][0] + dsa_options = int(attr_default(msg, 'options', 0)) + + req_options = 0 + if not (dsa_options & dsdb.DS_NTDSDSA_OPT_DISABLE_OUTBOUND_REPL): + req_options |= drsuapi.DRSUAPI_DRS_WRIT_REP + if add_ref: + req_options |= drsuapi.DRSUAPI_DRS_ADD_REF + if sync_forced: + req_options |= drsuapi.DRSUAPI_DRS_SYNC_FORCED + if sync_all: + req_options |= drsuapi.DRSUAPI_DRS_SYNC_ALL + if full_sync: + req_options |= drsuapi.DRSUAPI_DRS_FULL_SYNC_NOW + if async_op: + req_options |= drsuapi.DRSUAPI_DRS_ASYNC_OP + + try: + drs_utils.sendDsReplicaSync(server_bind, server_bind_handle, source_dsa_guid, NC, req_options) + except drs_utils.drsException as estr: + raise CommandError("DsReplicaSync failed", estr) + if async_op: + self.message("Replicate from %s to %s was started." % (SOURCE_DC, DEST_DC)) + else: + self.message("Replicate from %s to %s was successful." % (SOURCE_DC, DEST_DC)) + + +class cmd_drs_bind(Command): + """Show DRS capabilities of a server.""" + + synopsis = "%prog [] [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_args = ["DC?"] + + def run(self, DC=None, sambaopts=None, + credopts=None, versionopts=None): + + self.lp = sambaopts.get_loadparm() + if DC is None: + DC = common.netcmd_dnsname(self.lp) + self.server = DC + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + drsuapi_connect(self) + + bind_info = drsuapi.DsBindInfoCtr() + bind_info.length = 28 + bind_info.info = drsuapi.DsBindInfo28() + (info, handle) = self.drsuapi.DsBind(misc.GUID(drsuapi.DRSUAPI_DS_BIND_GUID), bind_info) + + optmap = [ + ("DRSUAPI_SUPPORTED_EXTENSION_BASE", "DRS_EXT_BASE"), + ("DRSUAPI_SUPPORTED_EXTENSION_ASYNC_REPLICATION", "DRS_EXT_ASYNCREPL"), + ("DRSUAPI_SUPPORTED_EXTENSION_REMOVEAPI", "DRS_EXT_REMOVEAPI"), + ("DRSUAPI_SUPPORTED_EXTENSION_MOVEREQ_V2", "DRS_EXT_MOVEREQ_V2"), + ("DRSUAPI_SUPPORTED_EXTENSION_GETCHG_COMPRESS", "DRS_EXT_GETCHG_DEFLATE"), + ("DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V1", "DRS_EXT_DCINFO_V1"), + ("DRSUAPI_SUPPORTED_EXTENSION_RESTORE_USN_OPTIMIZATION", "DRS_EXT_RESTORE_USN_OPTIMIZATION"), + ("DRSUAPI_SUPPORTED_EXTENSION_ADDENTRY", "DRS_EXT_ADDENTRY"), + ("DRSUAPI_SUPPORTED_EXTENSION_KCC_EXECUTE", "DRS_EXT_KCC_EXECUTE"), + ("DRSUAPI_SUPPORTED_EXTENSION_ADDENTRY_V2", "DRS_EXT_ADDENTRY_V2"), + ("DRSUAPI_SUPPORTED_EXTENSION_LINKED_VALUE_REPLICATION", "DRS_EXT_LINKED_VALUE_REPLICATION"), + ("DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V2", "DRS_EXT_DCINFO_V2"), + ("DRSUAPI_SUPPORTED_EXTENSION_INSTANCE_TYPE_NOT_REQ_ON_MOD", "DRS_EXT_INSTANCE_TYPE_NOT_REQ_ON_MOD"), + ("DRSUAPI_SUPPORTED_EXTENSION_CRYPTO_BIND", "DRS_EXT_CRYPTO_BIND"), + ("DRSUAPI_SUPPORTED_EXTENSION_GET_REPL_INFO", "DRS_EXT_GET_REPL_INFO"), + ("DRSUAPI_SUPPORTED_EXTENSION_STRONG_ENCRYPTION", "DRS_EXT_STRONG_ENCRYPTION"), + ("DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V01", "DRS_EXT_DCINFO_VFFFFFFFF"), + ("DRSUAPI_SUPPORTED_EXTENSION_TRANSITIVE_MEMBERSHIP", "DRS_EXT_TRANSITIVE_MEMBERSHIP"), + ("DRSUAPI_SUPPORTED_EXTENSION_ADD_SID_HISTORY", "DRS_EXT_ADD_SID_HISTORY"), + ("DRSUAPI_SUPPORTED_EXTENSION_POST_BETA3", "DRS_EXT_POST_BETA3"), + ("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V5", "DRS_EXT_GETCHGREQ_V5"), + ("DRSUAPI_SUPPORTED_EXTENSION_GET_MEMBERSHIPS2", "DRS_EXT_GETMEMBERSHIPS2"), + ("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V6", "DRS_EXT_GETCHGREQ_V6"), + ("DRSUAPI_SUPPORTED_EXTENSION_NONDOMAIN_NCS", "DRS_EXT_NONDOMAIN_NCS"), + ("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V8", "DRS_EXT_GETCHGREQ_V8"), + ("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V5", "DRS_EXT_GETCHGREPLY_V5"), + ("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V6", "DRS_EXT_GETCHGREPLY_V6"), + ("DRSUAPI_SUPPORTED_EXTENSION_ADDENTRYREPLY_V3", "DRS_EXT_WHISTLER_BETA3"), + ("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V7", "DRS_EXT_WHISTLER_BETA3"), + ("DRSUAPI_SUPPORTED_EXTENSION_VERIFY_OBJECT", "DRS_EXT_WHISTLER_BETA3"), + ("DRSUAPI_SUPPORTED_EXTENSION_XPRESS_COMPRESS", "DRS_EXT_W2K3_DEFLATE"), + ("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V10", "DRS_EXT_GETCHGREQ_V10"), + ("DRSUAPI_SUPPORTED_EXTENSION_RESERVED_PART2", "DRS_EXT_RESERVED_FOR_WIN2K_OR_DOTNET_PART2"), + ("DRSUAPI_SUPPORTED_EXTENSION_RESERVED_PART3", "DRS_EXT_RESERVED_FOR_WIN2K_OR_DOTNET_PART3") + ] + + optmap_ext = [ + ("DRSUAPI_SUPPORTED_EXTENSION_ADAM", "DRS_EXT_ADAM"), + ("DRSUAPI_SUPPORTED_EXTENSION_LH_BETA2", "DRS_EXT_LH_BETA2"), + ("DRSUAPI_SUPPORTED_EXTENSION_RECYCLE_BIN", "DRS_EXT_RECYCLE_BIN")] + + self.message("Bind to %s succeeded." % DC) + self.message("Extensions supported:") + for (opt, str) in optmap: + optval = getattr(drsuapi, opt, 0) + if info.info.supported_extensions & optval: + yesno = "Yes" + else: + yesno = "No " + self.message(" %-60s: %s (%s)" % (opt, yesno, str)) + + if isinstance(info.info, drsuapi.DsBindInfo48): + self.message("\nExtended Extensions supported:") + for (opt, str) in optmap_ext: + optval = getattr(drsuapi, opt, 0) + if info.info.supported_extensions_ext & optval: + yesno = "Yes" + else: + yesno = "No " + self.message(" %-60s: %s (%s)" % (opt, yesno, str)) + + self.message("\nSite GUID: %s" % info.info.site_guid) + self.message("Repl epoch: %u" % info.info.repl_epoch) + if isinstance(info.info, drsuapi.DsBindInfo48): + self.message("Forest GUID: %s" % info.info.config_dn_guid) + + +class cmd_drs_options(Command): + """Query or change 'options' for NTDS Settings object of a Domain Controller.""" + + synopsis = "%prog [] [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_args = ["DC?"] + + takes_options = [ + Option("--dsa-option", help="DSA option to enable/disable", type="str", + metavar="{+|-}IS_GC | {+|-}DISABLE_INBOUND_REPL | {+|-}DISABLE_OUTBOUND_REPL | {+|-}DISABLE_NTDSCONN_XLATE"), + ] + + option_map = {"IS_GC": 0x00000001, + "DISABLE_INBOUND_REPL": 0x00000002, + "DISABLE_OUTBOUND_REPL": 0x00000004, + "DISABLE_NTDSCONN_XLATE": 0x00000008} + + def run(self, DC=None, dsa_option=None, + sambaopts=None, credopts=None, versionopts=None): + + self.lp = sambaopts.get_loadparm() + if DC is None: + DC = common.netcmd_dnsname(self.lp) + self.server = DC + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + samdb_connect(self) + + ntds_dn = self.samdb.get_dsServiceName() + res = self.samdb.search(base=ntds_dn, scope=ldb.SCOPE_BASE, attrs=["options"]) + dsa_opts = int(res[0]["options"][0]) + + # print out current DSA options + cur_opts = [x for x in self.option_map if self.option_map[x] & dsa_opts] + self.message("Current DSA options: " + ", ".join(cur_opts)) + + # modify options + if dsa_option: + if dsa_option[:1] not in ("+", "-"): + raise CommandError("Unknown option %s" % dsa_option) + flag = dsa_option[1:] + if flag not in self.option_map.keys(): + raise CommandError("Unknown option %s" % dsa_option) + if dsa_option[:1] == "+": + dsa_opts |= self.option_map[flag] + else: + dsa_opts &= ~self.option_map[flag] + # save new options + m = ldb.Message() + m.dn = ldb.Dn(self.samdb, ntds_dn) + m["options"] = ldb.MessageElement(str(dsa_opts), ldb.FLAG_MOD_REPLACE, "options") + self.samdb.modify(m) + # print out new DSA options + cur_opts = [x for x in self.option_map if self.option_map[x] & dsa_opts] + self.message("New DSA options: " + ", ".join(cur_opts)) + + +class cmd_drs_clone_dc_database(Command): + """Replicate an initial clone of domain, but DO NOT JOIN it.""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("--server", help="DC to join", type=str), + Option("--targetdir", help="where to store provision (required)", type=str), + Option("-q", "--quiet", help="Be quiet", action="store_true"), + Option("--include-secrets", help="Also replicate secret values", action="store_true"), + Option("--backend-store", type="choice", metavar="BACKENDSTORE", + choices=["tdb", "mdb"], + help="Specify the database backend to be used " + "(default is %s)" % get_default_backend_store()), + Option("--backend-store-size", type="bytes", metavar="SIZE", + help="Specify the size of the backend database, currently" + + "only supported by lmdb backends (default is 8 Gb).") + ] + + takes_args = ["domain"] + + def run(self, domain, sambaopts=None, credopts=None, + versionopts=None, server=None, targetdir=None, + quiet=False, verbose=False, include_secrets=False, + backend_store=None, backend_store_size=None): + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp) + + logger = self.get_logger(verbose=verbose, quiet=quiet) + + if targetdir is None: + raise CommandError("--targetdir option must be specified") + + join_clone(logger=logger, server=server, creds=creds, lp=lp, + domain=domain, dns_backend='SAMBA_INTERNAL', + targetdir=targetdir, include_secrets=include_secrets, + backend_store=backend_store, + backend_store_size=backend_store_size) + + +class cmd_drs_uptodateness(Command): + """Show uptodateness status""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("-H", "--URL", metavar="URL", dest="H", + help="LDB URL for database or target server"), + Option("-p", "--partition", + help="restrict to this partition"), + Option("--json", action='store_true', + help="Print data in json format"), + Option("--maximum", action='store_true', + help="Print maximum out-of-date-ness only"), + Option("--median", action='store_true', + help="Print median out-of-date-ness only"), + Option("--full", action='store_true', + help="Print full out-of-date-ness data"), + ] + + def format_as_json(self, partitions_summaries): + return json.dumps(partitions_summaries, indent=2) + + def format_as_text(self, partitions_summaries): + lines = [] + for part_name, summary in partitions_summaries.items(): + items = ['%s: %s' % (k, v) for k, v in summary.items()] + line = '%-15s %s' % (part_name, ' '.join(items)) + lines.append(line) + return '\n'.join(lines) + + def run(self, H=None, partition=None, + json=False, maximum=False, median=False, full=False, + sambaopts=None, credopts=None, versionopts=None, + quiet=False, verbose=False): + + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp, fallback_machine=True) + local_kcc, dsas = get_kcc_and_dsas(H, lp, creds) + samdb = local_kcc.samdb + short_partitions, _ = get_partition_maps(samdb) + if partition: + if partition in short_partitions: + part_dn = short_partitions[partition] + # narrow down to specified partition only + short_partitions = {partition: part_dn} + else: + raise CommandError("unknown partition %s" % partition) + + filters = [] + if maximum: + filters.append('maximum') + if median: + filters.append('median') + + partitions_distances = {} + partitions_summaries = {} + for part_name, part_dn in short_partitions.items(): + utdv_edges = get_utdv_edges(local_kcc, dsas, part_dn, lp, creds) + distances = get_utdv_distances(utdv_edges, dsas) + summary = get_utdv_summary(distances, filters=filters) + partitions_distances[part_name] = distances + partitions_summaries[part_name] = summary + + if full: + # always print json format + output = self.format_as_json(partitions_distances) + else: + if json: + output = self.format_as_json(partitions_summaries) + else: + output = self.format_as_text(partitions_summaries) + + print(output, file=self.outf) + + +class cmd_drs(SuperCommand): + """Directory Replication Services (DRS) management.""" + + subcommands = {} + subcommands["bind"] = cmd_drs_bind() + subcommands["kcc"] = cmd_drs_kcc() + subcommands["replicate"] = cmd_drs_replicate() + subcommands["showrepl"] = cmd_drs_showrepl() + subcommands["options"] = cmd_drs_options() + subcommands["clone-dc-database"] = cmd_drs_clone_dc_database() + subcommands["uptodateness"] = cmd_drs_uptodateness() diff --git a/python/samba/netcmd/dsacl.py b/python/samba/netcmd/dsacl.py new file mode 100644 index 0000000..527c534 --- /dev/null +++ b/python/samba/netcmd/dsacl.py @@ -0,0 +1,217 @@ +# Manipulate ACLs on directory objects +# +# Copyright (C) Nadezhda Ivanova 2010 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import samba.getopt as options +from samba import sd_utils +from samba.dcerpc import security +from samba.samdb import SamDB +from samba.ndr import ndr_unpack, ndr_pack +from samba.dcerpc.security import ( + GUID_DRS_ALLOCATE_RIDS, GUID_DRS_CHANGE_DOMAIN_MASTER, + GUID_DRS_CHANGE_INFR_MASTER, GUID_DRS_CHANGE_PDC, + GUID_DRS_CHANGE_RID_MASTER, GUID_DRS_CHANGE_SCHEMA_MASTER, + GUID_DRS_GET_CHANGES, GUID_DRS_GET_ALL_CHANGES, + GUID_DRS_GET_FILTERED_ATTRIBUTES, GUID_DRS_MANAGE_TOPOLOGY, + GUID_DRS_MONITOR_TOPOLOGY, GUID_DRS_REPL_SYNCRONIZE, + GUID_DRS_RO_REPL_SECRET_SYNC) + + +import ldb +from ldb import SCOPE_BASE +import re + +from samba.auth import system_session +from samba.netcmd import ( + Command, + CommandError, + SuperCommand, + Option, +) + +class cmd_dsacl_base(Command): + """Base class for DSACL commands.""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "versionopts": options.VersionOptions, + } + + def print_acl(self, sd_helper, object_dn, prefix=''): + desc_sddl = sd_helper.get_sd_as_sddl(object_dn) + self.outf.write("%sdescriptor for %s:\n" % (prefix, object_dn)) + self.outf.write(desc_sddl + "\n") + + +class cmd_dsacl_set(cmd_dsacl_base): + """Modify access list on a directory object.""" + + car_help = """ The access control right to allow or deny """ + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", + type=str, metavar="URL", dest="H"), + Option("--car", type="choice", choices=["change-rid", + "change-pdc", + "change-infrastructure", + "change-schema", + "change-naming", + "allocate_rids", + "get-changes", + "get-changes-all", + "get-changes-filtered", + "topology-manage", + "topology-monitor", + "repl-sync", + "ro-repl-secret-sync"], + help=car_help), + Option("--action", type="choice", choices=["allow", "deny"], + help="""Deny or allow access"""), + Option("--objectdn", help="DN of the object whose SD to modify", + type="string"), + Option("--trusteedn", help="DN of the entity that gets access", + type="string"), + Option("--sddl", help="An ACE or group of ACEs to be added on the object", + type="string"), + ] + + def find_trustee_sid(self, samdb, trusteedn): + res = samdb.search(base=trusteedn, expression="(objectClass=*)", + scope=SCOPE_BASE) + assert(len(res) == 1) + return ndr_unpack(security.dom_sid, res[0]["objectSid"][0]) + + def add_ace(self, sd_helper, object_dn, new_ace): + """Add new ace explicitly.""" + ai,ii = sd_helper.dacl_prepend_aces(object_dn, new_ace) + for ace in ii: + sddl = ace.as_sddl(sd_helper.domain_sid) + self.outf.write("WARNING: ignored INHERITED_ACE (%s).\n" % sddl) + for ace in ai: + sddl = ace.as_sddl(sd_helper.domain_sid) + self.outf.write("WARNING: (%s) was already found in the current security descriptor.\n" % sddl) + + def run(self, car, action, objectdn, trusteedn, sddl, + H=None, credopts=None, sambaopts=None, versionopts=None): + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp) + + if sddl is None and (car is None or action is None + or objectdn is None or trusteedn is None): + return self.usage() + + samdb = SamDB(url=H, session_info=system_session(), + credentials=creds, lp=lp) + sd_helper = sd_utils.SDUtils(samdb) + cars = {'change-rid': GUID_DRS_CHANGE_RID_MASTER, + 'change-pdc': GUID_DRS_CHANGE_PDC, + 'change-infrastructure': GUID_DRS_CHANGE_INFR_MASTER, + 'change-schema': GUID_DRS_CHANGE_SCHEMA_MASTER, + 'change-naming': GUID_DRS_CHANGE_DOMAIN_MASTER, + 'allocate_rids': GUID_DRS_ALLOCATE_RIDS, + 'get-changes': GUID_DRS_GET_CHANGES, + 'get-changes-all': GUID_DRS_GET_ALL_CHANGES, + 'get-changes-filtered': GUID_DRS_GET_FILTERED_ATTRIBUTES, + 'topology-manage': GUID_DRS_MANAGE_TOPOLOGY, + 'topology-monitor': GUID_DRS_MONITOR_TOPOLOGY, + 'repl-sync': GUID_DRS_REPL_SYNCRONIZE, + 'ro-repl-secret-sync': GUID_DRS_RO_REPL_SECRET_SYNC, + } + sid = self.find_trustee_sid(samdb, trusteedn) + if sddl: + new_ace = sddl + elif action == "allow": + new_ace = "(OA;;CR;%s;;%s)" % (cars[car], str(sid)) + elif action == "deny": + new_ace = "(OD;;CR;%s;;%s)" % (cars[car], str(sid)) + else: + raise CommandError("Wrong argument '%s'!" % action) + + self.print_acl(sd_helper, objectdn, prefix='old ') + self.add_ace(sd_helper, objectdn, new_ace) + self.print_acl(sd_helper, objectdn, prefix='new ') + + +class cmd_dsacl_get(cmd_dsacl_base): + """Print access list on a directory object.""" + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", + type=str, metavar="URL", dest="H"), + Option("--objectdn", help="DN of the object whose SD to modify", + type="string"), + ] + + def run(self, objectdn, + H=None, credopts=None, sambaopts=None, versionopts=None): + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp) + + samdb = SamDB(url=H, session_info=system_session(), + credentials=creds, lp=lp) + sd_helper = sd_utils.SDUtils(samdb) + self.print_acl(sd_helper, objectdn) + + +class cmd_dsacl_delete(cmd_dsacl_base): + """Delete an access list entry on a directory object.""" + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", + type=str, metavar="URL", dest="H"), + Option("--objectdn", help="DN of the object whose SD to modify", + type="string"), + Option("--sddl", help="An ACE or group of ACEs to be deleted from the object", + type="string"), + ] + + def run(self, objectdn, sddl, H=None, credopts=None, sambaopts=None, versionopts=None): + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp) + + if sddl is None or objectdn is None: + return self.usage() + + samdb = SamDB(url=H, session_info=system_session(), + credentials=creds, lp=lp) + sd_helper = sd_utils.SDUtils(samdb) + + self.print_acl(sd_helper, objectdn, prefix='old ') + self.delete_ace(sd_helper, objectdn, sddl) + self.print_acl(sd_helper, objectdn, prefix='new ') + + def delete_ace(self, sd_helper, object_dn, delete_aces): + """Delete ace explicitly.""" + di,ii = sd_helper.dacl_delete_aces(object_dn, delete_aces) + for ace in ii: + sddl = ace.as_sddl(sd_helper.domain_sid) + self.outf.write("WARNING: ignored INHERITED_ACE (%s).\n" % sddl) + for ace in di: + sddl = ace.as_sddl(sd_helper.domain_sid) + self.outf.write("WARNING: (%s) was not found in the current security descriptor.\n" % sddl) + + +class cmd_dsacl(SuperCommand): + """DS ACLs manipulation.""" + + subcommands = {} + subcommands["set"] = cmd_dsacl_set() + subcommands["get"] = cmd_dsacl_get() + subcommands["delete"] = cmd_dsacl_delete() diff --git a/python/samba/netcmd/encoders.py b/python/samba/netcmd/encoders.py new file mode 100644 index 0000000..7d32b68 --- /dev/null +++ b/python/samba/netcmd/encoders.py @@ -0,0 +1,49 @@ +# Unix SMB/CIFS implementation. +# +# encoders: JSONEncoder class for dealing with object fields. +# +# Copyright (C) Catalyst.Net Ltd. 2023 +# +# Written by Rob van der Linde +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import json +from datetime import datetime +from decimal import Decimal +from enum import Enum + +from ldb import Dn + + +class JSONEncoder(json.JSONEncoder): + """Custom JSON encoder class to help out with some data types. + + For example, the json module has no idea how to encode a Dn object to str. + Another common object that is handled is Decimal types. + + In addition, any objects that have a __json__ method will get called. + """ + + def default(self, obj): + if isinstance(obj, (Decimal, Dn)): + return str(obj) + elif isinstance(obj, Enum): + return str(obj.value) + elif isinstance(obj, datetime): + return obj.isoformat() + elif getattr(obj, "__json__", None) and callable(obj.__json__): + return obj.__json__() + return obj diff --git a/python/samba/netcmd/forest.py b/python/samba/netcmd/forest.py new file mode 100644 index 0000000..4a5293c --- /dev/null +++ b/python/samba/netcmd/forest.py @@ -0,0 +1,167 @@ +# domain management +# +# Copyright William Brown 2018 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import ldb +import samba.getopt as options +from samba.auth import system_session +from samba.samdb import SamDB +from samba.netcmd import ( + Command, + CommandError, + SuperCommand, + Option +) + + +class cmd_forest_show(Command): + """Display forest settings. + + These settings control the behaviour of all domain controllers in this + forest. This displays those settings from the replicated configuration + partition. + """ + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", + type=str, metavar="URL", dest="H"), + ] + + def run(self, H=None, credopts=None, sambaopts=None, versionopts=None): + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp) + + samdb = SamDB(url=H, session_info=system_session(), + credentials=creds, lp=lp) + + domain_dn = samdb.domain_dn() + object_dn = "%s,%s" % (self.objectdn, domain_dn) + + # Show all the settings we know how to set in the forest object! + res = samdb.search(base=object_dn, scope=ldb.SCOPE_BASE, + attrs=self.attributes) + + # Now we just display these attributes. The value is that + # we make them a bit prettier and human accessible. + # There should only be one response! + res_object = res[0] + + self.outf.write("Settings for %s\n" % object_dn) + for attr in self.attributes: + try: + self.outf.write("%s: %s\n" % (attr, res_object[attr][0])) + except KeyError: + self.outf.write("%s: \n" % attr) + + +class cmd_forest_set(Command): + """Modify forest settings. + + This will alter the setting specified to value. + """ + + attribute = None + objectdn = None + + synopsis = "%prog value [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", + type=str, metavar="URL", dest="H"), + ] + + takes_args = ["value"] + + def run(self, value, H=None, credopts=None, sambaopts=None, versionopts=None): + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp) + + samdb = SamDB(url=H, session_info=system_session(), + credentials=creds, lp=lp) + + domain_dn = samdb.domain_dn() + object_dn = "%s,%s" % (self.objectdn, domain_dn) + + # Create the modification + m = ldb.Message() + m.dn = ldb.Dn(samdb, object_dn) + m[self.attribute] = ldb.MessageElement( + value, ldb.FLAG_MOD_REPLACE, self.attribute) + + samdb.modify(m) + self.outf.write("set %s: %s\n" % (self.attribute, value)) + + +# Then you override it for each setting name: + +class cmd_forest_show_directory_service(cmd_forest_show): + """Display Directory Service settings for the forest. + + These settings control how the Directory Service behaves on all domain + controllers in the forest. + """ + objectdn = "CN=Directory Service,CN=Windows NT,CN=Services,CN=Configuration" + attributes = ['dsheuristics'] + + +class cmd_forest_set_directory_service_dsheuristics(cmd_forest_set): + """Set the value of dsheuristics on the Directory Service. + + This value alters the behaviour of the Directory Service on all domain + controllers in the forest. Documentation related to this parameter can be + found here: https://msdn.microsoft.com/en-us/library/cc223560.aspx + + In summary each "character" of the number-string, controls a setting. + A common setting is to set the value "2" in the 7th character. This controls + anonymous search behaviour. + + Example: dsheuristics 0000002 + + This would allow anonymous LDAP searches to the domain (you may still need + to alter access controls to allow this). + """ + objectdn = "CN=Directory Service,CN=Windows NT,CN=Services,CN=Configuration" + attribute = 'dsheuristics' + + +class cmd_forest_directory_service(SuperCommand): + """Forest configuration partition management.""" + + subcommands = {} + subcommands["show"] = cmd_forest_show_directory_service() + subcommands["dsheuristics"] = cmd_forest_set_directory_service_dsheuristics() + + +class cmd_forest(SuperCommand): + """Forest management.""" + + subcommands = {} + subcommands["directory_service"] = cmd_forest_directory_service() diff --git a/python/samba/netcmd/fsmo.py b/python/samba/netcmd/fsmo.py new file mode 100644 index 0000000..643d0ae --- /dev/null +++ b/python/samba/netcmd/fsmo.py @@ -0,0 +1,535 @@ +# Changes a FSMO role owner +# +# Copyright Nadezhda Ivanova 2009 +# Copyright Jelmer Vernooij 2009 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import samba +import samba.getopt as options +import ldb +from ldb import LdbError +from samba.dcerpc import drsuapi, misc +from samba.auth import system_session +import samba.drs_utils +from samba.netcmd import ( + Command, + CommandError, + SuperCommand, + Option, +) +from samba.samdb import SamDB + + +def get_fsmo_roleowner(samdb, roledn, role): + """Gets the owner of an FSMO role + + :param roledn: The DN of the FSMO role + :param role: The FSMO role + """ + try: + res = samdb.search(roledn, + scope=ldb.SCOPE_BASE, attrs=["fSMORoleOwner"]) + except LdbError as e7: + (num, msg) = e7.args + if num == ldb.ERR_NO_SUCH_OBJECT: + raise CommandError("The '%s' role is not present in this domain" % role) + raise + + if 'fSMORoleOwner' in res[0]: + master_owner = (ldb.Dn(samdb, res[0]["fSMORoleOwner"][0].decode('utf8'))) + else: + master_owner = None + + return master_owner + + +def transfer_dns_role(outf, sambaopts, credopts, role, samdb): + """Transfer dns FSMO role. """ + + if role == "domaindns": + domain_dn = samdb.domain_dn() + role_object = "CN=Infrastructure,DC=DomainDnsZones," + domain_dn + elif role == "forestdns": + forest_dn = samba.dn_from_dns_name(samdb.forest_dns_name()) + role_object = "CN=Infrastructure,DC=ForestDnsZones," + forest_dn + + new_host_dns_name = samdb.host_dns_name() + + res = samdb.search(role_object, + attrs=["fSMORoleOwner"], + scope=ldb.SCOPE_BASE, + controls=["extended_dn:1:1"]) + + if 'fSMORoleOwner' in res[0]: + try: + master_guid = str(misc.GUID(ldb.Dn(samdb, + res[0]['fSMORoleOwner'][0].decode('utf8')) + .get_extended_component('GUID'))) + master_owner = str(ldb.Dn(samdb, res[0]['fSMORoleOwner'][0].decode('utf8'))) + except LdbError as e3: + (num, msg) = e3.args + raise CommandError("No GUID found in naming master DN %s : %s \n" % + (res[0]['fSMORoleOwner'][0], msg)) + else: + outf.write("* The '%s' role does not have an FSMO roleowner\n" % role) + return False + + if role == "domaindns": + master_dns_name = '%s._msdcs.%s' % (master_guid, + samdb.domain_dns_name()) + new_dns_name = '%s._msdcs.%s' % (samdb.get_ntds_GUID(), + samdb.domain_dns_name()) + elif role == "forestdns": + master_dns_name = '%s._msdcs.%s' % (master_guid, + samdb.forest_dns_name()) + new_dns_name = '%s._msdcs.%s' % (samdb.get_ntds_GUID(), + samdb.forest_dns_name()) + + new_owner = samdb.get_dsServiceName() + + if master_dns_name != new_dns_name: + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp, fallback_machine=True) + samdb = SamDB(url="ldap://%s" % (master_dns_name), + session_info=system_session(), + credentials=creds, lp=lp) + + m = ldb.Message() + m.dn = ldb.Dn(samdb, role_object) + m["fSMORoleOwner_Del"] = ldb.MessageElement(master_owner, + ldb.FLAG_MOD_DELETE, + "fSMORoleOwner") + m["fSMORoleOwner_Add"] = ldb.MessageElement(new_owner, + ldb.FLAG_MOD_ADD, + "fSMORoleOwner") + try: + samdb.modify(m) + except LdbError as e5: + (num, msg) = e5.args + raise CommandError("Failed to add role '%s': %s" % (role, msg)) + + try: + connection = samba.drs_utils.drsuapi_connect(new_host_dns_name, + lp, creds) + except samba.drs_utils.drsException as e: + raise CommandError("Drsuapi Connect failed", e) + + try: + drsuapi_connection = connection[0] + drsuapi_handle = connection[1] + req_options = drsuapi.DRSUAPI_DRS_WRIT_REP + NC = role_object[18:] + samba.drs_utils.sendDsReplicaSync(drsuapi_connection, + drsuapi_handle, + master_guid, + NC, req_options) + except samba.drs_utils.drsException as estr: + raise CommandError("Replication failed", estr) + + outf.write("FSMO transfer of '%s' role successful\n" % role) + return True + else: + outf.write("This DC already has the '%s' FSMO role\n" % role) + return False + + +def transfer_role(outf, role, samdb): + """Transfer standard FSMO role. """ + + domain_dn = samdb.domain_dn() + rid_dn = "CN=RID Manager$,CN=System," + domain_dn + naming_dn = "CN=Partitions,%s" % samdb.get_config_basedn() + infrastructure_dn = "CN=Infrastructure," + domain_dn + schema_dn = str(samdb.get_schema_basedn()) + new_owner = ldb.Dn(samdb, samdb.get_dsServiceName()) + m = ldb.Message() + m.dn = ldb.Dn(samdb, "") + if role == "rid": + master_owner = get_fsmo_roleowner(samdb, rid_dn, role) + m["becomeRidMaster"] = ldb.MessageElement( + "1", ldb.FLAG_MOD_REPLACE, + "becomeRidMaster") + elif role == "pdc": + master_owner = get_fsmo_roleowner(samdb, domain_dn, role) + + res = samdb.search(domain_dn, + scope=ldb.SCOPE_BASE, attrs=["objectSid"]) + assert len(res) == 1 + sid = res[0]["objectSid"][0] + m["becomePdc"] = ldb.MessageElement( + sid, ldb.FLAG_MOD_REPLACE, + "becomePdc") + elif role == "naming": + master_owner = get_fsmo_roleowner(samdb, naming_dn, role) + m["becomeDomainMaster"] = ldb.MessageElement( + "1", ldb.FLAG_MOD_REPLACE, + "becomeDomainMaster") + elif role == "infrastructure": + master_owner = get_fsmo_roleowner(samdb, infrastructure_dn, role) + m["becomeInfrastructureMaster"] = ldb.MessageElement( + "1", ldb.FLAG_MOD_REPLACE, + "becomeInfrastructureMaster") + elif role == "schema": + master_owner = get_fsmo_roleowner(samdb, schema_dn, role) + m["becomeSchemaMaster"] = ldb.MessageElement( + "1", ldb.FLAG_MOD_REPLACE, + "becomeSchemaMaster") + else: + raise CommandError("Invalid FSMO role.") + + if master_owner is None: + outf.write("Cannot transfer, no DC assigned to the %s role. Try 'seize' instead\n" % role) + return False + + if master_owner != new_owner: + try: + samdb.modify(m) + except LdbError as e6: + (num, msg) = e6.args + raise CommandError("Transfer of '%s' role failed: %s" % + (role, msg)) + + outf.write("FSMO transfer of '%s' role successful\n" % role) + return True + else: + outf.write("This DC already has the '%s' FSMO role\n" % role) + return False + + +class cmd_fsmo_seize(Command): + """Seize the role.""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "versionopts": options.VersionOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", + type=str, metavar="URL", dest="H"), + Option("--force", + help="Force seizing of role without attempting to transfer.", + action="store_true"), + Option("--role", type="choice", choices=["rid", "pdc", "infrastructure", + "schema", "naming", "domaindns", "forestdns", "all"], + help="""The FSMO role to seize or transfer.\n +rid=RidAllocationMasterRole\n +schema=SchemaMasterRole\n +pdc=PdcEmulationMasterRole\n +naming=DomainNamingMasterRole\n +infrastructure=InfrastructureMasterRole\n +domaindns=DomainDnsZonesMasterRole\n +forestdns=ForestDnsZonesMasterRole\n +all=all of the above\n +You must provide an Admin user and password."""), + ] + + takes_args = [] + + def seize_role(self, role, samdb, force): + """Seize standard fsmo role. """ + + serviceName = samdb.get_dsServiceName() + domain_dn = samdb.domain_dn() + self.infrastructure_dn = "CN=Infrastructure," + domain_dn + self.naming_dn = "CN=Partitions,%s" % samdb.get_config_basedn() + self.schema_dn = str(samdb.get_schema_basedn()) + self.rid_dn = "CN=RID Manager$,CN=System," + domain_dn + + m = ldb.Message() + if role == "rid": + m.dn = ldb.Dn(samdb, self.rid_dn) + elif role == "pdc": + m.dn = ldb.Dn(samdb, domain_dn) + elif role == "naming": + m.dn = ldb.Dn(samdb, self.naming_dn) + elif role == "infrastructure": + m.dn = ldb.Dn(samdb, self.infrastructure_dn) + elif role == "schema": + m.dn = ldb.Dn(samdb, self.schema_dn) + else: + raise CommandError("Invalid FSMO role.") + # first try to transfer to avoid problem if the owner is still active + seize = False + master_owner = get_fsmo_roleowner(samdb, m.dn, role) + # if there is a different owner + if master_owner is not None: + # if there is a different owner + if master_owner != serviceName: + # if --force isn't given, attempt transfer + if force is None: + self.message("Attempting transfer...") + try: + transfer_role(self.outf, role, samdb) + except: + # transfer failed, use the big axe... + seize = True + self.message("Transfer unsuccessful, seizing...") + else: + self.message("Transfer successful, not seizing role") + return True + else: + self.outf.write("This DC already has the '%s' FSMO role\n" % + role) + return False + else: + seize = True + + if force is not None or seize: + self.message("Seizing %s FSMO role..." % role) + m["fSMORoleOwner"] = ldb.MessageElement( + serviceName, ldb.FLAG_MOD_REPLACE, + "fSMORoleOwner") + + samdb.transaction_start() + try: + samdb.modify(m) + if role == "rid": + # We may need to allocate the initial RID Set + samdb.create_own_rid_set() + + except LdbError as e1: + (num, msg) = e1.args + if role == "rid" and num == ldb.ERR_ENTRY_ALREADY_EXISTS: + + # Try again without the RID Set allocation + # (normal). We have to manage the transaction as + # we do not have nested transactions and creating + # a RID set touches multiple objects. :-( + samdb.transaction_cancel() + samdb.transaction_start() + try: + samdb.modify(m) + except LdbError as e: + (num, msg) = e.args + samdb.transaction_cancel() + raise CommandError("Failed to seize '%s' role: %s" % + (role, msg)) + + else: + samdb.transaction_cancel() + raise CommandError("Failed to seize '%s' role: %s" % + (role, msg)) + samdb.transaction_commit() + self.outf.write("FSMO seize of '%s' role successful\n" % role) + + return True + + def seize_dns_role(self, role, samdb, credopts, sambaopts, + versionopts, force): + """Seize DNS FSMO role. """ + + serviceName = samdb.get_dsServiceName() + domain_dn = samdb.domain_dn() + forest_dn = samba.dn_from_dns_name(samdb.forest_dns_name()) + self.domaindns_dn = "CN=Infrastructure,DC=DomainDnsZones," + domain_dn + self.forestdns_dn = "CN=Infrastructure,DC=ForestDnsZones," + forest_dn + + m = ldb.Message() + if role == "domaindns": + m.dn = ldb.Dn(samdb, self.domaindns_dn) + elif role == "forestdns": + m.dn = ldb.Dn(samdb, self.forestdns_dn) + else: + raise CommandError("Invalid FSMO role.") + # first try to transfer to avoid problem if the owner is still active + seize = False + master_owner = get_fsmo_roleowner(samdb, m.dn, role) + if master_owner is not None: + # if there is a different owner + if master_owner != serviceName: + # if --force isn't given, attempt transfer + if force is None: + self.message("Attempting transfer...") + try: + transfer_dns_role(self.outf, sambaopts, credopts, role, + samdb) + except: + # transfer failed, use the big axe... + seize = True + self.message("Transfer unsuccessful, seizing...") + else: + self.message("Transfer successful, not seizing role\n") + return True + else: + self.outf.write("This DC already has the '%s' FSMO role\n" % + role) + return False + else: + seize = True + + if force is not None or seize: + self.message("Seizing %s FSMO role..." % role) + m["fSMORoleOwner"] = ldb.MessageElement( + serviceName, ldb.FLAG_MOD_REPLACE, + "fSMORoleOwner") + try: + samdb.modify(m) + except LdbError as e2: + (num, msg) = e2.args + raise CommandError("Failed to seize '%s' role: %s" % + (role, msg)) + self.outf.write("FSMO seize of '%s' role successful\n" % role) + return True + + def run(self, force=None, H=None, role=None, + credopts=None, sambaopts=None, versionopts=None): + + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp, fallback_machine=True) + + samdb = SamDB(url=H, session_info=system_session(), + credentials=creds, lp=lp) + + if role == "all": + self.seize_role("rid", samdb, force) + self.seize_role("pdc", samdb, force) + self.seize_role("naming", samdb, force) + self.seize_role("infrastructure", samdb, force) + self.seize_role("schema", samdb, force) + self.seize_dns_role("domaindns", samdb, credopts, sambaopts, + versionopts, force) + self.seize_dns_role("forestdns", samdb, credopts, sambaopts, + versionopts, force) + else: + if role == "domaindns" or role == "forestdns": + self.seize_dns_role(role, samdb, credopts, sambaopts, + versionopts, force) + else: + self.seize_role(role, samdb, force) + + +class cmd_fsmo_show(Command): + """Show the roles.""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "versionopts": options.VersionOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", + type=str, metavar="URL", dest="H"), + ] + + takes_args = [] + + def run(self, H=None, credopts=None, sambaopts=None, versionopts=None): + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp, fallback_machine=True) + + samdb = SamDB(url=H, session_info=system_session(), + credentials=creds, lp=lp) + + domain_dn = samdb.domain_dn() + forest_dn = samba.dn_from_dns_name(samdb.forest_dns_name()) + infrastructure_dn = "CN=Infrastructure," + domain_dn + naming_dn = "CN=Partitions,%s" % samdb.get_config_basedn() + schema_dn = samdb.get_schema_basedn() + rid_dn = "CN=RID Manager$,CN=System," + domain_dn + domaindns_dn = "CN=Infrastructure,DC=DomainDnsZones," + domain_dn + forestdns_dn = "CN=Infrastructure,DC=ForestDnsZones," + forest_dn + + masters = [(schema_dn, "schema", "SchemaMasterRole"), + (infrastructure_dn, "infrastructure", "InfrastructureMasterRole"), + (rid_dn, "rid", "RidAllocationMasterRole"), + (domain_dn, "pdc", "PdcEmulationMasterRole"), + (naming_dn, "naming", "DomainNamingMasterRole"), + (domaindns_dn, "domaindns", "DomainDnsZonesMasterRole"), + (forestdns_dn, "forestdns", "ForestDnsZonesMasterRole"), + ] + + for master in masters: + (dn, short_name, long_name) = master + try: + master = get_fsmo_roleowner(samdb, dn, short_name) + if master is not None: + self.message("%s owner: %s" % (long_name, str(master))) + else: + self.message("%s has no current owner" % (long_name)) + except CommandError as e: + self.message("%s: * %s" % (long_name, e.message)) + + +class cmd_fsmo_transfer(Command): + """Transfer the role.""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "credopts": options.CredentialsOptions, + "versionopts": options.VersionOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", + type=str, metavar="URL", dest="H"), + Option("--role", type="choice", choices=["rid", "pdc", "infrastructure", + "schema", "naming", "domaindns", "forestdns", "all"], + help="""The FSMO role to seize or transfer.\n +rid=RidAllocationMasterRole\n +schema=SchemaMasterRole\n +pdc=PdcEmulationMasterRole\n +naming=DomainNamingMasterRole\n +infrastructure=InfrastructureMasterRole\n +domaindns=DomainDnsZonesMasterRole\n +forestdns=ForestDnsZonesMasterRole\n +all=all of the above\n +You must provide an Admin user and password."""), + ] + + takes_args = [] + + def run(self, force=None, H=None, role=None, + credopts=None, sambaopts=None, versionopts=None): + + lp = sambaopts.get_loadparm() + creds = credopts.get_credentials(lp, fallback_machine=True) + + samdb = SamDB(url=H, session_info=system_session(), + credentials=creds, lp=lp) + + if role == "all": + transfer_role(self.outf, "rid", samdb) + transfer_role(self.outf, "pdc", samdb) + transfer_role(self.outf, "naming", samdb) + transfer_role(self.outf, "infrastructure", samdb) + transfer_role(self.outf, "schema", samdb) + transfer_dns_role(self.outf, sambaopts, credopts, + "domaindns", samdb) + transfer_dns_role(self.outf, sambaopts, credopts, "forestdns", + samdb) + else: + if role == "domaindns" or role == "forestdns": + transfer_dns_role(self.outf, sambaopts, credopts, role, samdb) + else: + transfer_role(self.outf, role, samdb) + + +class cmd_fsmo(SuperCommand): + """Flexible Single Master Operations (FSMO) roles management.""" + + subcommands = {} + subcommands["seize"] = cmd_fsmo_seize() + subcommands["show"] = cmd_fsmo_show() + subcommands["transfer"] = cmd_fsmo_transfer() diff --git a/python/samba/netcmd/gpcommon.py b/python/samba/netcmd/gpcommon.py new file mode 100644 index 0000000..b8ac09e --- /dev/null +++ b/python/samba/netcmd/gpcommon.py @@ -0,0 +1,55 @@ +# Samba common group policy functions +# +# Copyright Andrew Tridgell 2010 +# Copyright Amitay Isaacs 2011-2012 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +import ldb +from samba.credentials import SMB_SIGNING_REQUIRED +from samba.samba3 import param as s3param +from samba.samba3 import libsmb_samba_internal as libsmb +from samba.netcmd import CommandError + +def get_gpo_dn(samdb, gpo): + """Construct the DN for gpo""" + + dn = samdb.get_default_basedn() + dn.add_child(ldb.Dn(samdb, "CN=Policies,CN=System")) + dn.add_child(ldb.Dn(samdb, "CN=%s" % gpo)) + return dn + +def create_directory_hier(conn, remotedir): + elems = remotedir.replace('/', '\\').split('\\') + path = "" + for e in elems: + path = path + '\\' + e + if not conn.chkpath(path): + conn.mkdir(path) + +def smb_connection(dc_hostname, service, lp, creds): + # SMB connect to DC + # Force signing for the smb connection + saved_signing_state = creds.get_smb_signing() + creds.set_smb_signing(SMB_SIGNING_REQUIRED) + try: + # the SMB bindings rely on having a s3 loadparm + s3_lp = s3param.get_context() + s3_lp.load(lp.configfile) + conn = libsmb.Conn(dc_hostname, service, lp=s3_lp, creds=creds) + except Exception: + raise CommandError("Error connecting to '%s' using SMB" % dc_hostname) + # Reset signing state + creds.set_smb_signing(saved_signing_state) + return conn diff --git a/python/samba/netcmd/gpo.py b/python/samba/netcmd/gpo.py new file mode 100644 index 0000000..ba55b2e --- /dev/null +++ b/python/samba/netcmd/gpo.py @@ -0,0 +1,4513 @@ +# implement samba_tool gpo commands +# +# Copyright Andrew Tridgell 2010 +# Copyright Amitay Isaacs 2011-2012 +# +# based on C implementation by Guenther Deschner and Wilco Baan Hofman +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +import os +import sys +import samba.getopt as options +import ldb +import re +import xml.etree.ElementTree as ET +import shutil +import tempfile + +from samba.auth import system_session +from samba.netcmd import ( + Command, + CommandError, + Option, + SuperCommand, +) +from samba.samdb import SamDB +from samba import dsdb +from samba.dcerpc import security +from samba.ndr import ndr_unpack, ndr_pack +from samba.dcerpc import preg +import samba.security +import samba.auth +from samba.auth import AUTH_SESSION_INFO_DEFAULT_GROUPS, AUTH_SESSION_INFO_AUTHENTICATED, AUTH_SESSION_INFO_SIMPLE_PRIVILEGES +from samba.netcmd.common import netcmd_finddc +from samba import policy +from samba.samba3 import libsmb_samba_internal as libsmb +from samba import NTSTATUSError +import uuid +from samba.ntacls import dsacl2fsacl +from samba.dcerpc import nbt +from samba.net import Net +from samba.gp_parse import GPParser, GPNoParserException, GPGeneralizeException +from samba.gp_parse.gp_pol import GPPolParser +from samba.gp_parse.gp_ini import ( + GPIniParser, + GPTIniParser, + GPFDeploy1IniParser, + GPScriptsIniParser +) +from samba.gp_parse.gp_csv import GPAuditCsvParser +from samba.gp_parse.gp_inf import GptTmplInfParser +from samba.gp_parse.gp_aas import GPAasParser +from samba import param +from samba.netcmd.common import attr_default +from samba.common import get_bytes, get_string +from configparser import ConfigParser +from io import StringIO, BytesIO +from samba.gp.vgp_files_ext import calc_mode, stat_from_mode +import hashlib +import json +from samba.registry import str_regtype +from samba.ntstatus import ( + NT_STATUS_OBJECT_NAME_INVALID, + NT_STATUS_OBJECT_NAME_NOT_FOUND, + NT_STATUS_OBJECT_PATH_NOT_FOUND, + NT_STATUS_OBJECT_NAME_COLLISION, + NT_STATUS_ACCESS_DENIED +) +from samba.netcmd.gpcommon import ( + create_directory_hier, + smb_connection, + get_gpo_dn +) +from samba.policies import RegistryGroupPolicies +from samba.dcerpc.misc import REG_MULTI_SZ +from samba.gp.gpclass import register_gp_extension, list_gp_extensions, \ + unregister_gp_extension + + +def gpo_flags_string(value): + """return gpo flags string""" + flags = policy.get_gpo_flags(value) + if not flags: + ret = 'NONE' + else: + ret = ' '.join(flags) + return ret + + +def gplink_options_string(value): + """return gplink options string""" + options = policy.get_gplink_options(value) + if not options: + ret = 'NONE' + else: + ret = ' '.join(options) + return ret + + +def parse_gplink(gplink): + """parse a gPLink into an array of dn and options""" + ret = [] + + if gplink.strip() == '': + return ret + + a = gplink.split(']') + for g in a: + if not g: + continue + d = g.split(';') + if len(d) != 2 or not d[0].startswith("[LDAP://"): + raise RuntimeError("Badly formed gPLink '%s'" % g) + ret.append({'dn': d[0][8:], 'options': int(d[1])}) + return ret + + +def encode_gplink(gplist): + """Encode an array of dn and options into gPLink string""" + ret = "".join("[LDAP://%s;%d]" % (g['dn'], g['options']) for g in gplist) + return ret + + +def dc_url(lp, creds, url=None, dc=None): + """If URL is not specified, return URL for writable DC. + If dc is provided, use that to construct ldap URL""" + + if url is None: + if dc is None: + try: + dc = netcmd_finddc(lp, creds) + except Exception as e: + raise RuntimeError("Could not find a DC for domain", e) + url = 'ldap://' + dc + return url + + +def get_gpo_info(samdb, gpo=None, displayname=None, dn=None, + sd_flags=(security.SECINFO_OWNER | + security.SECINFO_GROUP | + security.SECINFO_DACL | + security.SECINFO_SACL)): + """Get GPO information using gpo, displayname or dn""" + + policies_dn = samdb.get_default_basedn() + policies_dn.add_child(ldb.Dn(samdb, "CN=Policies,CN=System")) + + base_dn = policies_dn + search_expr = "(objectClass=groupPolicyContainer)" + search_scope = ldb.SCOPE_ONELEVEL + + if gpo is not None: + search_expr = "(&(objectClass=groupPolicyContainer)(name=%s))" % ldb.binary_encode(gpo) + + if displayname is not None: + search_expr = "(&(objectClass=groupPolicyContainer)(displayname=%s))" % ldb.binary_encode(displayname) + + if dn is not None: + base_dn = dn + search_scope = ldb.SCOPE_BASE + + try: + msg = samdb.search(base=base_dn, scope=search_scope, + expression=search_expr, + attrs=['nTSecurityDescriptor', + 'versionNumber', + 'flags', + 'name', + 'displayName', + 'gPCFileSysPath', + 'gPCMachineExtensionNames', + 'gPCUserExtensionNames'], + controls=['sd_flags:1:%d' % sd_flags]) + except Exception as e: + if gpo is not None: + mesg = "Cannot get information for GPO %s" % gpo + else: + mesg = "Cannot get information for GPOs" + raise CommandError(mesg, e) + + return msg + + +def get_gpo_containers(samdb, gpo): + """lists dn of containers for a GPO""" + + search_expr = "(&(objectClass=*)(gPLink=*%s*))" % gpo + try: + msg = samdb.search(expression=search_expr, attrs=['gPLink']) + except Exception as e: + raise CommandError("Could not find container(s) with GPO %s" % gpo, e) + + return msg + + +def del_gpo_link(samdb, container_dn, gpo): + """delete GPO link for the container""" + # Check if valid Container DN and get existing GPlinks + try: + msg = samdb.search(base=container_dn, scope=ldb.SCOPE_BASE, + expression="(objectClass=*)", + attrs=['gPLink'])[0] + except Exception as e: + raise CommandError("Container '%s' does not exist" % container_dn, e) + + found = False + gpo_dn = str(get_gpo_dn(samdb, gpo)) + if 'gPLink' in msg: + gplist = parse_gplink(str(msg['gPLink'][0])) + for g in gplist: + if g['dn'].lower() == gpo_dn.lower(): + gplist.remove(g) + found = True + break + else: + raise CommandError("No GPO(s) linked to this container") + + if not found: + raise CommandError("GPO '%s' not linked to this container" % gpo) + + m = ldb.Message() + m.dn = container_dn + if gplist: + gplink_str = encode_gplink(gplist) + m['r0'] = ldb.MessageElement(gplink_str, ldb.FLAG_MOD_REPLACE, 'gPLink') + else: + m['d0'] = ldb.MessageElement(msg['gPLink'][0], ldb.FLAG_MOD_DELETE, 'gPLink') + try: + samdb.modify(m) + except Exception as e: + raise CommandError("Error removing GPO from container", e) + + +def parse_unc(unc): + """Parse UNC string into a hostname, a service, and a filepath""" + tmp = [] + if unc.startswith('\\\\'): + tmp = unc[2:].split('\\', 2) + elif unc.startswith('//'): + tmp = unc[2:].split('/', 2) + + if len(tmp) != 3: + raise ValueError("Invalid UNC string: %s" % unc) + + return tmp + + +def find_parser(name, flags=re.IGNORECASE): + if re.match(r'fdeploy1\.ini$', name, flags=flags): + return GPFDeploy1IniParser() + if re.match(r'audit\.csv$', name, flags=flags): + return GPAuditCsvParser() + if re.match(r'GptTmpl\.inf$', name, flags=flags): + return GptTmplInfParser() + if re.match(r'GPT\.INI$', name, flags=flags): + return GPTIniParser() + if re.match(r'scripts\.ini$', name, flags=flags): + return GPScriptsIniParser() + if re.match(r'psscripts\.ini$', name, flags=flags): + return GPScriptsIniParser() + if re.match(r'GPE\.INI$', name, flags=flags): + # This file does not appear in the protocol specifications! + # + # It appears to be a legacy file used to maintain gPCUserExtensionNames + # and gPCMachineExtensionNames. We should just copy the file as binary. + return GPParser() + if re.match(r'.*\.ini$', name, flags=flags): + return GPIniParser() + if re.match(r'.*\.pol$', name, flags=flags): + return GPPolParser() + if re.match(r'.*\.aas$', name, flags=flags): + return GPAasParser() + + return GPParser() + + +def backup_directory_remote_to_local(conn, remotedir, localdir): + SUFFIX = '.SAMBABACKUP' + if not os.path.isdir(localdir): + os.mkdir(localdir) + r_dirs = [ remotedir ] + l_dirs = [ localdir ] + while r_dirs: + r_dir = r_dirs.pop() + l_dir = l_dirs.pop() + + dirlist = conn.list(r_dir, attribs=attr_flags) + dirlist.sort(key=lambda x : x['name']) + for e in dirlist: + r_name = r_dir + '\\' + e['name'] + l_name = os.path.join(l_dir, e['name']) + + if e['attrib'] & libsmb.FILE_ATTRIBUTE_DIRECTORY: + r_dirs.append(r_name) + l_dirs.append(l_name) + os.mkdir(l_name) + else: + data = conn.loadfile(r_name) + with open(l_name + SUFFIX, 'wb') as f: + f.write(data) + + parser = find_parser(e['name']) + parser.parse(data) + parser.write_xml(l_name + '.xml') + + +attr_flags = libsmb.FILE_ATTRIBUTE_SYSTEM | \ + libsmb.FILE_ATTRIBUTE_DIRECTORY | \ + libsmb.FILE_ATTRIBUTE_ARCHIVE | \ + libsmb.FILE_ATTRIBUTE_HIDDEN + + +def copy_directory_remote_to_local(conn, remotedir, localdir): + if not os.path.isdir(localdir): + os.mkdir(localdir) + r_dirs = [remotedir] + l_dirs = [localdir] + while r_dirs: + r_dir = r_dirs.pop() + l_dir = l_dirs.pop() + + dirlist = conn.list(r_dir, attribs=attr_flags) + dirlist.sort(key=lambda x : x['name']) + for e in dirlist: + r_name = r_dir + '\\' + e['name'] + l_name = os.path.join(l_dir, e['name']) + + if e['attrib'] & libsmb.FILE_ATTRIBUTE_DIRECTORY: + r_dirs.append(r_name) + l_dirs.append(l_name) + os.mkdir(l_name) + else: + data = conn.loadfile(r_name) + open(l_name, 'wb').write(data) + + +def copy_directory_local_to_remote(conn, localdir, remotedir, + ignore_existing_dir=False, + keep_existing_files=False): + if not conn.chkpath(remotedir): + conn.mkdir(remotedir) + l_dirs = [localdir] + r_dirs = [remotedir] + while l_dirs: + l_dir = l_dirs.pop() + r_dir = r_dirs.pop() + + dirlist = os.listdir(l_dir) + dirlist.sort() + for e in dirlist: + l_name = os.path.join(l_dir, e) + r_name = r_dir + '\\' + e + + if os.path.isdir(l_name): + l_dirs.append(l_name) + r_dirs.append(r_name) + try: + conn.mkdir(r_name) + except NTSTATUSError: + if not ignore_existing_dir: + raise + else: + if keep_existing_files: + try: + conn.loadfile(r_name) + continue + except NTSTATUSError: + pass + + data = open(l_name, 'rb').read() + conn.savefile(r_name, data) + + +class GPOCommand(Command): + def construct_tmpdir(self, tmpdir, gpo): + """Ensure that the temporary directory structure used in fetch, + backup, create, and restore is consistent. + + If --tmpdir is used the named directory must be present, which may + contain a 'policy' subdirectory, but 'policy' must not itself have + a subdirectory with the gpo name. The policy and gpo directories + will be created. + + If --tmpdir is not used, a temporary directory is securely created. + """ + if tmpdir is None: + tmpdir = tempfile.mkdtemp() + print("Using temporary directory %s (use --tmpdir to change)" % tmpdir, + file=self.outf) + + if not os.path.isdir(tmpdir): + raise CommandError("Temporary directory '%s' does not exist" % tmpdir) + + localdir = os.path.join(tmpdir, "policy") + if not os.path.isdir(localdir): + os.mkdir(localdir) + + gpodir = os.path.join(localdir, gpo) + if os.path.isdir(gpodir): + raise CommandError( + "GPO directory '%s' already exists, refusing to overwrite" % gpodir) + + try: + os.mkdir(gpodir) + except (IOError, OSError) as e: + raise CommandError("Error creating teporary GPO directory", e) + + return tmpdir, gpodir + + def samdb_connect(self): + """make a ldap connection to the server""" + try: + self.samdb = SamDB(url=self.url, + session_info=system_session(), + credentials=self.creds, lp=self.lp) + except Exception as e: + raise CommandError("LDAP connection to %s failed " % self.url, e) + + +class cmd_listall(GPOCommand): + """List all GPOs.""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H") + ] + + def run(self, H=None, sambaopts=None, credopts=None, versionopts=None): + + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + self.url = dc_url(self.lp, self.creds, H) + + self.samdb_connect() + + msg = get_gpo_info(self.samdb, None) + + for m in msg: + self.outf.write("GPO : %s\n" % m['name'][0]) + self.outf.write("display name : %s\n" % m['displayName'][0]) + self.outf.write("path : %s\n" % m['gPCFileSysPath'][0]) + self.outf.write("dn : %s\n" % m.dn) + self.outf.write("version : %s\n" % attr_default(m, 'versionNumber', '0')) + self.outf.write("flags : %s\n" % gpo_flags_string(int(attr_default(m, 'flags', 0)))) + self.outf.write("\n") + + +class cmd_list(GPOCommand): + """List GPOs for an account.""" + + synopsis = "%prog [options]" + + takes_args = ['accountname'] + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", + type=str, metavar="URL", dest="H") + ] + + def run(self, accountname, H=None, sambaopts=None, credopts=None, versionopts=None): + + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + self.url = dc_url(self.lp, self.creds, H) + + self.samdb_connect() + + try: + msg = self.samdb.search(expression='(&(|(samAccountName=%s)(samAccountName=%s$))(objectClass=User))' % + (ldb.binary_encode(accountname), ldb.binary_encode(accountname))) + user_dn = msg[0].dn + except Exception: + raise CommandError("Failed to find account %s" % accountname) + + # check if its a computer account + try: + msg = self.samdb.search(base=user_dn, scope=ldb.SCOPE_BASE, attrs=['objectClass'])[0] + is_computer = 'computer' in msg['objectClass'] + except Exception: + raise CommandError("Failed to find objectClass for %s" % accountname) + + session_info_flags = (AUTH_SESSION_INFO_DEFAULT_GROUPS | + AUTH_SESSION_INFO_AUTHENTICATED) + + # When connecting to a remote server, don't look up the local privilege DB + if self.url is not None and self.url.startswith('ldap'): + session_info_flags |= AUTH_SESSION_INFO_SIMPLE_PRIVILEGES + + session = samba.auth.user_session(self.samdb, lp_ctx=self.lp, dn=user_dn, + session_info_flags=session_info_flags) + + token = session.security_token + + gpos = [] + + inherit = True + dn = ldb.Dn(self.samdb, str(user_dn)).parent() + while True: + msg = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE, attrs=['gPLink', 'gPOptions'])[0] + if 'gPLink' in msg: + glist = parse_gplink(str(msg['gPLink'][0])) + for g in glist: + if not inherit and not (g['options'] & dsdb.GPLINK_OPT_ENFORCE): + continue + if g['options'] & dsdb.GPLINK_OPT_DISABLE: + continue + + try: + sd_flags = (security.SECINFO_OWNER | + security.SECINFO_GROUP | + security.SECINFO_DACL) + gmsg = self.samdb.search(base=g['dn'], scope=ldb.SCOPE_BASE, + attrs=['name', 'displayName', 'flags', + 'nTSecurityDescriptor'], + controls=['sd_flags:1:%d' % sd_flags]) + secdesc_ndr = gmsg[0]['nTSecurityDescriptor'][0] + secdesc = ndr_unpack(security.descriptor, secdesc_ndr) + except Exception: + self.outf.write("Failed to fetch gpo object with nTSecurityDescriptor %s\n" % + g['dn']) + continue + + try: + samba.security.access_check(secdesc, token, + security.SEC_STD_READ_CONTROL | + security.SEC_ADS_LIST | + security.SEC_ADS_READ_PROP) + except RuntimeError: + self.outf.write("Failed access check on %s\n" % msg.dn) + continue + + # check the flags on the GPO + flags = int(attr_default(gmsg[0], 'flags', 0)) + if is_computer and (flags & dsdb.GPO_FLAG_MACHINE_DISABLE): + continue + if not is_computer and (flags & dsdb.GPO_FLAG_USER_DISABLE): + continue + gpos.append((gmsg[0]['displayName'][0], gmsg[0]['name'][0])) + + # check if this blocks inheritance + gpoptions = int(attr_default(msg, 'gPOptions', 0)) + if gpoptions & dsdb.GPO_BLOCK_INHERITANCE: + inherit = False + + if dn == self.samdb.get_default_basedn(): + break + dn = dn.parent() + + if is_computer: + msg_str = 'computer' + else: + msg_str = 'user' + + self.outf.write("GPOs for %s %s\n" % (msg_str, accountname)) + for g in gpos: + self.outf.write(" %s %s\n" % (g[0], g[1])) + + +class cmd_show(GPOCommand): + """Show information for a GPO.""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_args = ['gpo'] + + takes_options = [ + Option("-H", help="LDB URL for database or target server", type=str) + ] + + def run(self, gpo, H=None, sambaopts=None, credopts=None, versionopts=None): + + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + # We need to know writable DC to setup SMB connection + if H and H.startswith('ldap://'): + dc_hostname = H[7:] + self.url = H + else: + dc_hostname = netcmd_finddc(self.lp, self.creds) + self.url = dc_url(self.lp, self.creds, dc=dc_hostname) + + self.samdb_connect() + + try: + msg = get_gpo_info(self.samdb, gpo)[0] + except Exception: + raise CommandError("GPO '%s' does not exist" % gpo) + + try: + secdesc_ndr = msg['nTSecurityDescriptor'][0] + secdesc = ndr_unpack(security.descriptor, secdesc_ndr) + secdesc_sddl = secdesc.as_sddl() + except Exception: + secdesc_sddl = "" + + self.outf.write("GPO : %s\n" % msg['name'][0]) + self.outf.write("display name : %s\n" % msg['displayName'][0]) + self.outf.write("path : %s\n" % msg['gPCFileSysPath'][0]) + if 'gPCMachineExtensionNames' in msg: + self.outf.write("Machine Exts : %s\n" % msg['gPCMachineExtensionNames'][0]) + if 'gPCUserExtensionNames' in msg: + self.outf.write("User Exts : %s\n" % msg['gPCUserExtensionNames'][0]) + self.outf.write("dn : %s\n" % msg.dn) + self.outf.write("version : %s\n" % attr_default(msg, 'versionNumber', '0')) + self.outf.write("flags : %s\n" % gpo_flags_string(int(attr_default(msg, 'flags', 0)))) + self.outf.write("ACL : %s\n" % secdesc_sddl) + + # SMB connect to DC + conn = smb_connection(dc_hostname, + 'sysvol', + lp=self.lp, + creds=self.creds) + + realm = self.lp.get('realm') + pol_file = '\\'.join([realm.lower(), 'Policies', gpo, + '%s\\Registry.pol']) + policy_defs = [] + for policy_class in ['MACHINE', 'USER']: + try: + pol_data = ndr_unpack(preg.file, + conn.loadfile(pol_file % policy_class)) + except NTSTATUSError as e: + if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID, + NT_STATUS_OBJECT_NAME_NOT_FOUND, + NT_STATUS_OBJECT_PATH_NOT_FOUND]: + continue # The file doesn't exist, so there is nothing to list + if e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + raise + + for entry in pol_data.entries: + if entry.valuename == "**delvals.": + continue + defs = {} + defs['keyname'] = entry.keyname + defs['valuename'] = entry.valuename + defs['class'] = policy_class + defs['type'] = str_regtype(entry.type) + defs['data'] = entry.data + # Bytes aren't JSON serializable + if type(defs['data']) == bytes: + if entry.type == REG_MULTI_SZ: + data = defs['data'].decode('utf-16-le') + defs['data'] = data.rstrip('\x00').split('\x00') + else: + defs['data'] = list(defs['data']) + policy_defs.append(defs) + self.outf.write("Policies :\n") + json.dump(policy_defs, self.outf, indent=4) + self.outf.write("\n") + + +class cmd_load(GPOCommand): + """Load policies onto a GPO. + + Reads json from standard input until EOF, unless a json formatted + file is provided via --content. + + Example json_input: + [ + { + "keyname": "Software\\Policies\\Mozilla\\Firefox\\Homepage", + "valuename": "StartPage", + "class": "USER", + "type": "REG_SZ", + "data": "homepage" + }, + { + "keyname": "Software\\Policies\\Mozilla\\Firefox\\Homepage", + "valuename": "URL", + "class": "USER", + "type": "REG_SZ", + "data": "google.com" + }, + { + "keyname": "Software\\Microsoft\\Internet Explorer\\Toolbar", + "valuename": "IEToolbar", + "class": "USER", + "type": "REG_BINARY", + "data": [0] + }, + { + "keyname": "Software\\Policies\\Microsoft\\InputPersonalization", + "valuename": "RestrictImplicitTextCollection", + "class": "USER", + "type": "REG_DWORD", + "data": 1 + } + ] + + Valid class attributes: MACHINE|USER|BOTH + Data arrays are interpreted as bytes. + + The --machine-ext-name and --user-ext-name options are multi-value inputs + which respectively set the gPCMachineExtensionNames and gPCUserExtensionNames + ldap attributes on the GPO. These attributes must be set to the correct GUID + names for Windows Group Policy to work correctly. These GUIDs represent + the client side extensions to apply on the machine. Linux Group Policy does + not enforce this constraint. + {35378EAC-683F-11D2-A89A-00C04FBBCFA2} is provided by default, which + enables most Registry policies. + """ + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_args = ['gpo'] + + takes_options = [ + Option("-H", help="LDB URL for database or target server", type=str), + Option("--content", help="JSON file of policy inputs", type=str), + Option("--machine-ext-name", + action="append", dest="machine_exts", + default=['{35378EAC-683F-11D2-A89A-00C04FBBCFA2}'], + help="A machine extension name to add to gPCMachineExtensionNames"), + Option("--user-ext-name", + action="append", dest="user_exts", + default=['{35378EAC-683F-11D2-A89A-00C04FBBCFA2}'], + help="A user extension name to add to gPCUserExtensionNames"), + Option("--replace", action='store_true', default=False, + help="Replace the existing Group Policies, rather than merging") + ] + + def run(self, gpo, H=None, content=None, + machine_exts=None, + user_exts=None, + replace=False, sambaopts=None, credopts=None, versionopts=None): + if machine_exts is None: + machine_exts = ['{35378EAC-683F-11D2-A89A-00C04FBBCFA2}'] + if user_exts is None: + user_exts = ['{35378EAC-683F-11D2-A89A-00C04FBBCFA2}'] + if content is None: + policy_defs = json.loads(sys.stdin.read()) + elif os.path.exists(content): + with open(content, 'rb') as r: + policy_defs = json.load(r) + else: + raise CommandError("The JSON content file does not exist") + + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + self.url = dc_url(self.lp, self.creds, H) + self.samdb_connect() + reg = RegistryGroupPolicies(gpo, self.lp, self.creds, self.samdb, H) + for ext_name in machine_exts: + reg.register_extension_name(ext_name, 'gPCMachineExtensionNames') + for ext_name in user_exts: + reg.register_extension_name(ext_name, 'gPCUserExtensionNames') + try: + if replace: + reg.replace_s(policy_defs) + else: + reg.merge_s(policy_defs) + except NTSTATUSError as e: + if e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + else: + raise + + +class cmd_remove(GPOCommand): + """Remove policies from a GPO. + + Reads json from standard input until EOF, unless a json formatted + file is provided via --content. + + Example json_input: + [ + { + "keyname": "Software\\Policies\\Mozilla\\Firefox\\Homepage", + "valuename": "StartPage", + "class": "USER", + }, + { + "keyname": "Software\\Policies\\Mozilla\\Firefox\\Homepage", + "valuename": "URL", + "class": "USER", + }, + { + "keyname": "Software\\Microsoft\\Internet Explorer\\Toolbar", + "valuename": "IEToolbar", + "class": "USER" + }, + { + "keyname": "Software\\Policies\\Microsoft\\InputPersonalization", + "valuename": "RestrictImplicitTextCollection", + "class": "USER" + } + ] + + Valid class attributes: MACHINE|USER|BOTH + """ + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_args = ['gpo'] + + takes_options = [ + Option("-H", help="LDB URL for database or target server", type=str), + Option("--content", help="JSON file of policy inputs", type=str), + Option("--machine-ext-name", + action="append", default=[], dest="machine_exts", + help="A machine extension name to remove from gPCMachineExtensionNames"), + Option("--user-ext-name", + action="append", default=[], dest="user_exts", + help="A user extension name to remove from gPCUserExtensionNames") + ] + + def run(self, gpo, H=None, content=None, machine_exts=None, user_exts=None, + sambaopts=None, credopts=None, versionopts=None): + if machine_exts is None: + machine_exts = [] + if user_exts is None: + user_exts = [] + if content is None: + policy_defs = json.loads(sys.stdin.read()) + elif os.path.exists(content): + with open(content, 'rb') as r: + policy_defs = json.load(r) + else: + raise CommandError("The JSON content file does not exist") + + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + self.url = dc_url(self.lp, self.creds, H) + self.samdb_connect() + reg = RegistryGroupPolicies(gpo, self.lp, self.creds, self.samdb, H) + for ext_name in machine_exts: + reg.unregister_extension_name(ext_name, 'gPCMachineExtensionNames') + for ext_name in user_exts: + reg.unregister_extension_name(ext_name, 'gPCUserExtensionNames') + try: + reg.remove_s(policy_defs) + except NTSTATUSError as e: + if e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + else: + raise + + +class cmd_getlink(GPOCommand): + """List GPO Links for a container.""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_args = ['container_dn'] + + takes_options = [ + Option("-H", help="LDB URL for database or target server", type=str) + ] + + def run(self, container_dn, H=None, sambaopts=None, credopts=None, + versionopts=None): + + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + self.url = dc_url(self.lp, self.creds, H) + + self.samdb_connect() + + try: + msg = self.samdb.search(base=container_dn, scope=ldb.SCOPE_BASE, + expression="(objectClass=*)", + attrs=['gPLink'])[0] + except Exception: + raise CommandError("Container '%s' does not exist" % container_dn) + + if 'gPLink' in msg and msg['gPLink']: + self.outf.write("GPO(s) linked to DN %s\n" % container_dn) + gplist = parse_gplink(str(msg['gPLink'][0])) + for g in gplist: + msg = get_gpo_info(self.samdb, dn=g['dn']) + self.outf.write(" GPO : %s\n" % msg[0]['name'][0]) + self.outf.write(" Name : %s\n" % msg[0]['displayName'][0]) + self.outf.write(" Options : %s\n" % gplink_options_string(g['options'])) + self.outf.write("\n") + else: + self.outf.write("No GPO(s) linked to DN=%s\n" % container_dn) + + +class cmd_setlink(GPOCommand): + """Add or update a GPO link to a container.""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_args = ['container_dn', 'gpo'] + + takes_options = [ + Option("-H", help="LDB URL for database or target server", type=str), + Option("--disable", dest="disabled", default=False, action='store_true', + help="Disable policy"), + Option("--enforce", dest="enforced", default=False, action='store_true', + help="Enforce policy") + ] + + def run(self, container_dn, gpo, H=None, disabled=False, enforced=False, + sambaopts=None, credopts=None, versionopts=None): + + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + self.url = dc_url(self.lp, self.creds, H) + + self.samdb_connect() + + gplink_options = 0 + if disabled: + gplink_options |= dsdb.GPLINK_OPT_DISABLE + if enforced: + gplink_options |= dsdb.GPLINK_OPT_ENFORCE + + # Check if valid GPO DN + try: + get_gpo_info(self.samdb, gpo=gpo)[0] + except Exception: + raise CommandError("GPO '%s' does not exist" % gpo) + gpo_dn = str(get_gpo_dn(self.samdb, gpo)) + + # Check if valid Container DN + try: + msg = self.samdb.search(base=container_dn, scope=ldb.SCOPE_BASE, + expression="(objectClass=*)", + attrs=['gPLink'])[0] + except Exception: + raise CommandError("Container '%s' does not exist" % container_dn) + + # Update existing GPlinks or Add new one + existing_gplink = False + if 'gPLink' in msg: + gplist = parse_gplink(str(msg['gPLink'][0])) + existing_gplink = True + found = False + for g in gplist: + if g['dn'].lower() == gpo_dn.lower(): + g['options'] = gplink_options + found = True + break + if found: + raise CommandError("GPO '%s' already linked to this container" % gpo) + else: + gplist.insert(0, {'dn': gpo_dn, 'options': gplink_options}) + else: + gplist = [] + gplist.append({'dn': gpo_dn, 'options': gplink_options}) + + gplink_str = encode_gplink(gplist) + + m = ldb.Message() + m.dn = ldb.Dn(self.samdb, container_dn) + + if existing_gplink: + m['new_value'] = ldb.MessageElement(gplink_str, ldb.FLAG_MOD_REPLACE, 'gPLink') + else: + m['new_value'] = ldb.MessageElement(gplink_str, ldb.FLAG_MOD_ADD, 'gPLink') + + try: + self.samdb.modify(m) + except Exception as e: + raise CommandError("Error adding GPO Link", e) + + self.outf.write("Added/Updated GPO link\n") + cmd_getlink().run(container_dn, H, sambaopts, credopts, versionopts) + + +class cmd_dellink(GPOCommand): + """Delete GPO link from a container.""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_args = ['container', 'gpo'] + + takes_options = [ + Option("-H", help="LDB URL for database or target server", type=str), + ] + + def run(self, container, gpo, H=None, sambaopts=None, credopts=None, + versionopts=None): + + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + self.url = dc_url(self.lp, self.creds, H) + + self.samdb_connect() + + # Check if valid GPO + try: + get_gpo_info(self.samdb, gpo=gpo)[0] + except Exception: + raise CommandError("GPO '%s' does not exist" % gpo) + + container_dn = ldb.Dn(self.samdb, container) + del_gpo_link(self.samdb, container_dn, gpo) + self.outf.write("Deleted GPO link.\n") + cmd_getlink().run(container_dn, H, sambaopts, credopts, versionopts) + + +class cmd_listcontainers(GPOCommand): + """List all linked containers for a GPO.""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_args = ['gpo'] + + takes_options = [ + Option("-H", help="LDB URL for database or target server", type=str) + ] + + def run(self, gpo, H=None, sambaopts=None, credopts=None, + versionopts=None): + + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + self.url = dc_url(self.lp, self.creds, H) + + self.samdb_connect() + + msg = get_gpo_containers(self.samdb, gpo) + if len(msg): + self.outf.write("Container(s) using GPO %s\n" % gpo) + for m in msg: + self.outf.write(" DN: %s\n" % m['dn']) + else: + self.outf.write("No Containers using GPO %s\n" % gpo) + + +class cmd_getinheritance(GPOCommand): + """Get inheritance flag for a container.""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_args = ['container_dn'] + + takes_options = [ + Option("-H", help="LDB URL for database or target server", type=str) + ] + + def run(self, container_dn, H=None, sambaopts=None, credopts=None, + versionopts=None): + + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + self.url = dc_url(self.lp, self.creds, H) + + self.samdb_connect() + + try: + msg = self.samdb.search(base=container_dn, scope=ldb.SCOPE_BASE, + expression="(objectClass=*)", + attrs=['gPOptions'])[0] + except Exception: + raise CommandError("Container '%s' does not exist" % container_dn) + + inheritance = 0 + if 'gPOptions' in msg: + inheritance = int(msg['gPOptions'][0]) + + if inheritance == dsdb.GPO_BLOCK_INHERITANCE: + self.outf.write("Container has GPO_BLOCK_INHERITANCE\n") + else: + self.outf.write("Container has GPO_INHERIT\n") + + +class cmd_setinheritance(GPOCommand): + """Set inheritance flag on a container.""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_args = ['container_dn', 'inherit_state'] + + takes_options = [ + Option("-H", help="LDB URL for database or target server", type=str) + ] + + def run(self, container_dn, inherit_state, H=None, sambaopts=None, credopts=None, + versionopts=None): + + if inherit_state.lower() == 'block': + inheritance = dsdb.GPO_BLOCK_INHERITANCE + elif inherit_state.lower() == 'inherit': + inheritance = dsdb.GPO_INHERIT + else: + raise CommandError("Unknown inheritance state (%s)" % inherit_state) + + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + self.url = dc_url(self.lp, self.creds, H) + + self.samdb_connect() + try: + msg = self.samdb.search(base=container_dn, scope=ldb.SCOPE_BASE, + expression="(objectClass=*)", + attrs=['gPOptions'])[0] + except Exception: + raise CommandError("Container '%s' does not exist" % container_dn) + + m = ldb.Message() + m.dn = ldb.Dn(self.samdb, container_dn) + + if 'gPOptions' in msg: + m['new_value'] = ldb.MessageElement(str(inheritance), ldb.FLAG_MOD_REPLACE, 'gPOptions') + else: + m['new_value'] = ldb.MessageElement(str(inheritance), ldb.FLAG_MOD_ADD, 'gPOptions') + + try: + self.samdb.modify(m) + except Exception as e: + raise CommandError("Error setting inheritance state %s" % inherit_state, e) + + +class cmd_fetch(GPOCommand): + """Download a GPO.""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_args = ['gpo'] + + takes_options = [ + Option("-H", help="LDB URL for database or target server", type=str), + Option("--tmpdir", help="Temporary directory for copying policy files", type=str) + ] + + def run(self, gpo, H=None, tmpdir=None, sambaopts=None, credopts=None, versionopts=None): + + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + # We need to know writable DC to setup SMB connection + if H and H.startswith('ldap://'): + dc_hostname = H[7:] + self.url = H + else: + dc_hostname = netcmd_finddc(self.lp, self.creds) + self.url = dc_url(self.lp, self.creds, dc=dc_hostname) + + self.samdb_connect() + try: + msg = get_gpo_info(self.samdb, gpo)[0] + except Exception: + raise CommandError("GPO '%s' does not exist" % gpo) + + # verify UNC path + unc = str(msg['gPCFileSysPath'][0]) + try: + [dom_name, service, sharepath] = parse_unc(unc) + except ValueError: + raise CommandError("Invalid GPO path (%s)" % unc) + + # SMB connect to DC + conn = smb_connection(dc_hostname, service, lp=self.lp, + creds=self.creds) + + # Copy GPT + tmpdir, gpodir = self.construct_tmpdir(tmpdir, gpo) + + try: + copy_directory_remote_to_local(conn, sharepath, gpodir) + except Exception as e: + # FIXME: Catch more specific exception + raise CommandError("Error copying GPO from DC", e) + self.outf.write('GPO copied to %s\n' % gpodir) + + +class cmd_backup(GPOCommand): + """Backup a GPO.""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_args = ['gpo'] + + takes_options = [ + Option("-H", help="LDB URL for database or target server", type=str), + Option("--tmpdir", help="Temporary directory for copying policy files", type=str), + Option("--generalize", help="Generalize XML entities to restore", + default=False, action='store_true'), + Option("--entities", help="File to export defining XML entities for the restore", + dest='ent_file', type=str) + ] + + def run(self, gpo, H=None, tmpdir=None, generalize=False, sambaopts=None, + credopts=None, versionopts=None, ent_file=None): + + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + # We need to know writable DC to setup SMB connection + if H and H.startswith('ldap://'): + dc_hostname = H[7:] + self.url = H + else: + dc_hostname = netcmd_finddc(self.lp, self.creds) + self.url = dc_url(self.lp, self.creds, dc=dc_hostname) + + self.samdb_connect() + try: + msg = get_gpo_info(self.samdb, gpo)[0] + except Exception: + raise CommandError("GPO '%s' does not exist" % gpo) + + # verify UNC path + unc = str(msg['gPCFileSysPath'][0]) + try: + [dom_name, service, sharepath] = parse_unc(unc) + except ValueError: + raise CommandError("Invalid GPO path (%s)" % unc) + + # SMB connect to DC + conn = smb_connection(dc_hostname, service, lp=self.lp, + creds=self.creds) + + # Copy GPT + tmpdir, gpodir = self.construct_tmpdir(tmpdir, gpo) + + try: + backup_directory_remote_to_local(conn, sharepath, gpodir) + except Exception as e: + # FIXME: Catch more specific exception + raise CommandError("Error copying GPO from DC", e) + + self.outf.write('GPO copied to %s\n' % gpodir) + + if generalize: + self.outf.write('\nAttempting to generalize XML entities:\n') + entities = cmd_backup.generalize_xml_entities(self.outf, gpodir, + gpodir) + import operator + ents = "".join(''.format(ent[1].strip('&;'), ent[0]) \ + for ent in sorted(entities.items(), key=operator.itemgetter(1))) + + if ent_file: + with open(ent_file, 'w') as f: + f.write(ents) + self.outf.write('Entities successfully written to %s\n' % + ent_file) + else: + self.outf.write('\nEntities:\n') + self.outf.write(ents) + + # Backup the enabled GPO extension names + for ext in ('gPCMachineExtensionNames', 'gPCUserExtensionNames'): + if ext in msg: + with open(os.path.join(gpodir, ext + '.SAMBAEXT'), 'wb') as f: + f.write(msg[ext][0]) + + @staticmethod + def generalize_xml_entities(outf, sourcedir, targetdir): + entities = {} + + if not os.path.exists(targetdir): + os.mkdir(targetdir) + + l_dirs = [ sourcedir ] + r_dirs = [ targetdir ] + while l_dirs: + l_dir = l_dirs.pop() + r_dir = r_dirs.pop() + + dirlist = os.listdir(l_dir) + dirlist.sort() + for e in dirlist: + l_name = os.path.join(l_dir, e) + r_name = os.path.join(r_dir, e) + + if os.path.isdir(l_name): + l_dirs.append(l_name) + r_dirs.append(r_name) + if not os.path.exists(r_name): + os.mkdir(r_name) + else: + if l_name.endswith('.xml'): + # Restore the xml file if possible + + # Get the filename to find the parser + to_parse = os.path.basename(l_name)[:-4] + + parser = find_parser(to_parse) + try: + with open(l_name, 'r') as ltemp: + data = ltemp.read() + + concrete_xml = ET.fromstring(data) + found_entities = parser.generalize_xml(concrete_xml, r_name, entities) + except GPGeneralizeException: + outf.write('SKIPPING: Generalizing failed for %s\n' % to_parse) + + else: + # No need to generalize non-xml files. + # + # TODO This could be improved with xml files stored in + # the renamed backup file (with custom extension) by + # inlining them into the exported backups. + if not os.path.samefile(l_name, r_name): + shutil.copy2(l_name, r_name) + + return entities + + +class cmd_create(GPOCommand): + """Create an empty GPO.""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_args = ['displayname'] + + takes_options = [ + Option("-H", help="LDB URL for database or target server", type=str), + Option("--tmpdir", help="Temporary directory for copying policy files", type=str) + ] + + def run(self, displayname, H=None, tmpdir=None, sambaopts=None, credopts=None, + versionopts=None): + + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + net = Net(creds=self.creds, lp=self.lp) + + # We need to know writable DC to setup SMB connection + if H and H.startswith('ldap://'): + dc_hostname = H[7:] + self.url = H + flags = (nbt.NBT_SERVER_LDAP | + nbt.NBT_SERVER_DS | + nbt.NBT_SERVER_WRITABLE) + cldap_ret = net.finddc(address=dc_hostname, flags=flags) + else: + flags = (nbt.NBT_SERVER_LDAP | + nbt.NBT_SERVER_DS | + nbt.NBT_SERVER_WRITABLE) + cldap_ret = net.finddc(domain=self.lp.get('realm'), flags=flags) + dc_hostname = cldap_ret.pdc_dns_name + self.url = dc_url(self.lp, self.creds, dc=dc_hostname) + + self.samdb_connect() + + msg = get_gpo_info(self.samdb, displayname=displayname) + if msg.count > 0: + raise CommandError("A GPO already existing with name '%s'" % displayname) + + # Create new GUID + guid = str(uuid.uuid4()) + gpo = "{%s}" % guid.upper() + + self.gpo_name = gpo + + realm = cldap_ret.dns_domain + unc_path = "\\\\%s\\sysvol\\%s\\Policies\\%s" % (realm, realm, gpo) + + # Create GPT + self.tmpdir, gpodir = self.construct_tmpdir(tmpdir, gpo) + self.gpodir = gpodir + + try: + os.mkdir(os.path.join(gpodir, "Machine")) + os.mkdir(os.path.join(gpodir, "User")) + gpt_contents = "[General]\r\nVersion=0\r\n" + open(os.path.join(gpodir, "GPT.INI"), "w").write(gpt_contents) + except Exception as e: + raise CommandError("Error Creating GPO files", e) + + # Connect to DC over SMB + [dom_name, service, sharepath] = parse_unc(unc_path) + self.sharepath = sharepath + conn = smb_connection(dc_hostname, service, lp=self.lp, + creds=self.creds) + + self.conn = conn + + self.samdb.transaction_start() + try: + # Add cn= + gpo_dn = get_gpo_dn(self.samdb, gpo) + + m = ldb.Message() + m.dn = gpo_dn + m['a01'] = ldb.MessageElement("groupPolicyContainer", ldb.FLAG_MOD_ADD, "objectClass") + self.samdb.add(m) + + # Add cn=User,cn= + m = ldb.Message() + m.dn = ldb.Dn(self.samdb, "CN=User,%s" % str(gpo_dn)) + m['a01'] = ldb.MessageElement("container", ldb.FLAG_MOD_ADD, "objectClass") + self.samdb.add(m) + + # Add cn=Machine,cn= + m = ldb.Message() + m.dn = ldb.Dn(self.samdb, "CN=Machine,%s" % str(gpo_dn)) + m['a01'] = ldb.MessageElement("container", ldb.FLAG_MOD_ADD, "objectClass") + self.samdb.add(m) + + # Get new security descriptor + ds_sd_flags = (security.SECINFO_OWNER | + security.SECINFO_GROUP | + security.SECINFO_DACL) + msg = get_gpo_info(self.samdb, gpo=gpo, sd_flags=ds_sd_flags)[0] + ds_sd_ndr = msg['nTSecurityDescriptor'][0] + ds_sd = ndr_unpack(security.descriptor, ds_sd_ndr).as_sddl() + + # Create a file system security descriptor + domain_sid = security.dom_sid(self.samdb.get_domain_sid()) + sddl = dsacl2fsacl(ds_sd, domain_sid) + fs_sd = security.descriptor.from_sddl(sddl, domain_sid) + + # Copy GPO directory + create_directory_hier(conn, sharepath) + + # Set ACL + sio = (security.SECINFO_OWNER | + security.SECINFO_GROUP | + security.SECINFO_DACL | + security.SECINFO_PROTECTED_DACL) + conn.set_acl(sharepath, fs_sd, sio) + + # Copy GPO files over SMB + copy_directory_local_to_remote(conn, gpodir, sharepath) + + m = ldb.Message() + m.dn = gpo_dn + m['a02'] = ldb.MessageElement(displayname, ldb.FLAG_MOD_REPLACE, "displayName") + m['a03'] = ldb.MessageElement(unc_path, ldb.FLAG_MOD_REPLACE, "gPCFileSysPath") + m['a05'] = ldb.MessageElement("0", ldb.FLAG_MOD_REPLACE, "versionNumber") + m['a07'] = ldb.MessageElement("2", ldb.FLAG_MOD_REPLACE, "gpcFunctionalityVersion") + m['a04'] = ldb.MessageElement("0", ldb.FLAG_MOD_REPLACE, "flags") + controls = ["permissive_modify:0"] + self.samdb.modify(m, controls=controls) + except Exception: + self.samdb.transaction_cancel() + raise + else: + self.samdb.transaction_commit() + + if tmpdir is None: + # Without --tmpdir, we created one in /tmp/. It must go. + shutil.rmtree(self.tmpdir) + + self.outf.write("GPO '%s' created as %s\n" % (displayname, gpo)) + + +class cmd_restore(cmd_create): + """Restore a GPO to a new container.""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_args = ['displayname', 'backup'] + + takes_options = [ + Option("-H", help="LDB URL for database or target server", type=str), + Option("--tmpdir", help="Temporary directory for copying policy files", type=str), + Option("--entities", help="File defining XML entities to insert into DOCTYPE header", type=str), + Option("--restore-metadata", help="Keep the old GPT.INI file and associated version number", + default=False, action="store_true") + ] + + def restore_from_backup_to_local_dir(self, sourcedir, targetdir, dtd_header=''): + SUFFIX = '.SAMBABACKUP' + + if not os.path.exists(targetdir): + os.mkdir(targetdir) + + l_dirs = [ sourcedir ] + r_dirs = [ targetdir ] + while l_dirs: + l_dir = l_dirs.pop() + r_dir = r_dirs.pop() + + dirlist = os.listdir(l_dir) + dirlist.sort() + for e in dirlist: + l_name = os.path.join(l_dir, e) + r_name = os.path.join(r_dir, e) + + if os.path.isdir(l_name): + l_dirs.append(l_name) + r_dirs.append(r_name) + if not os.path.exists(r_name): + os.mkdir(r_name) + else: + if l_name.endswith('.xml'): + # Restore the xml file if possible + + # Get the filename to find the parser + to_parse = os.path.basename(l_name)[:-4] + + parser = find_parser(to_parse) + try: + with open(l_name, 'r') as ltemp: + data = ltemp.read() + xml_head = '' + + if data.startswith(xml_head): + # It appears that sometimes the DTD rejects + # the xml header being after it. + data = data[len(xml_head):] + + # Load the XML file with the DTD (entity) header + parser.load_xml(ET.fromstring(xml_head + dtd_header + data)) + else: + parser.load_xml(ET.fromstring(dtd_header + data)) + + # Write out the substituted files in the output + # location, ready to copy over. + parser.write_binary(r_name[:-4]) + + except GPNoParserException: + # In the failure case, we fallback + original_file = l_name[:-4] + SUFFIX + shutil.copy2(original_file, r_name[:-4]) + + self.outf.write('WARNING: No such parser for %s\n' % to_parse) + self.outf.write('WARNING: Falling back to simple copy-restore.\n') + except: + import traceback + traceback.print_exc() + + # In the failure case, we fallback + original_file = l_name[:-4] + SUFFIX + shutil.copy2(original_file, r_name[:-4]) + + self.outf.write('WARNING: Error during parsing for %s\n' % l_name) + self.outf.write('WARNING: Falling back to simple copy-restore.\n') + + def run(self, displayname, backup, H=None, tmpdir=None, entities=None, sambaopts=None, credopts=None, + versionopts=None, restore_metadata=None): + + dtd_header = '' + + if not os.path.exists(backup): + raise CommandError("Backup directory does not exist %s" % backup) + + if entities is not None: + # DOCTYPE name is meant to match root element, but ElementTree does + # not seem to care, so this seems to be enough. + + dtd_header = ')+\s*\Z', + entities_content, flags=re.MULTILINE) is None: + raise CommandError("Entities file does not appear to " + "conform to format\n" + 'e.g. ') + dtd_header += entities_content.strip() + + dtd_header += '\n]>\n' + + super().run(displayname, H, tmpdir, sambaopts, credopts, versionopts) + + try: + if tmpdir is None: + # Create GPT + self.tmpdir, gpodir = self.construct_tmpdir(tmpdir, self.gpo_name) + self.gpodir = gpodir + + # Iterate over backup files and restore with DTD + self.restore_from_backup_to_local_dir(backup, self.gpodir, + dtd_header) + + keep_new_files = not restore_metadata + + # Copy GPO files over SMB + copy_directory_local_to_remote(self.conn, self.gpodir, + self.sharepath, + ignore_existing_dir=True, + keep_existing_files=keep_new_files) + + gpo_dn = get_gpo_dn(self.samdb, self.gpo_name) + + # Restore the enabled extensions + for ext in ('gPCMachineExtensionNames', 'gPCUserExtensionNames'): + ext_file = os.path.join(backup, ext + '.SAMBAEXT') + if os.path.exists(ext_file): + with open(ext_file, 'rb') as f: + data = f.read() + + m = ldb.Message() + m.dn = gpo_dn + m[ext] = ldb.MessageElement(data, ldb.FLAG_MOD_REPLACE, + ext) + + self.samdb.modify(m) + + if tmpdir is None: + # Without --tmpdir, we created one in /tmp/. It must go. + shutil.rmtree(self.tmpdir) + + except Exception as e: + import traceback + traceback.print_exc() + self.outf.write(str(e) + '\n') + + self.outf.write("Failed to restore GPO -- deleting...\n") + cmd = cmd_del() + cmd.run(self.gpo_name, H, sambaopts, credopts, versionopts) + + raise CommandError("Failed to restore: %s" % e) + + +class cmd_del(GPOCommand): + """Delete a GPO.""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_args = ['gpo'] + + takes_options = [ + Option("-H", help="LDB URL for database or target server", type=str), + ] + + def run(self, gpo, H=None, sambaopts=None, credopts=None, + versionopts=None): + + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + # We need to know writable DC to setup SMB connection + if H and H.startswith('ldap://'): + dc_hostname = H[7:] + self.url = H + else: + dc_hostname = netcmd_finddc(self.lp, self.creds) + self.url = dc_url(self.lp, self.creds, dc=dc_hostname) + + self.samdb_connect() + + # Check if valid GPO + try: + msg = get_gpo_info(self.samdb, gpo=gpo)[0] + unc_path = str(msg['gPCFileSysPath'][0]) + except Exception: + raise CommandError("GPO '%s' does not exist" % gpo) + + # Connect to DC over SMB + [dom_name, service, sharepath] = parse_unc(unc_path) + conn = smb_connection(dc_hostname, service, lp=self.lp, + creds=self.creds) + + self.samdb.transaction_start() + try: + # Check for existing links + msg = get_gpo_containers(self.samdb, gpo) + + if len(msg): + self.outf.write("GPO %s is linked to containers\n" % gpo) + for m in msg: + del_gpo_link(self.samdb, m['dn'], gpo) + self.outf.write(" Removed link from %s.\n" % m['dn']) + + # Remove LDAP entries + gpo_dn = get_gpo_dn(self.samdb, gpo) + self.samdb.delete(ldb.Dn(self.samdb, "CN=User,%s" % str(gpo_dn))) + self.samdb.delete(ldb.Dn(self.samdb, "CN=Machine,%s" % str(gpo_dn))) + self.samdb.delete(gpo_dn) + + # Remove GPO files + conn.deltree(sharepath) + + except Exception: + self.samdb.transaction_cancel() + raise + else: + self.samdb.transaction_commit() + + self.outf.write("GPO %s deleted.\n" % gpo) + + +class cmd_aclcheck(GPOCommand): + """Check all GPOs have matching LDAP and DS ACLs.""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H") + ] + + def run(self, H=None, sambaopts=None, credopts=None, versionopts=None): + + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + self.url = dc_url(self.lp, self.creds, H) + + # We need to know writable DC to setup SMB connection + if H and H.startswith('ldap://'): + dc_hostname = H[7:] + self.url = H + else: + dc_hostname = netcmd_finddc(self.lp, self.creds) + self.url = dc_url(self.lp, self.creds, dc=dc_hostname) + + self.samdb_connect() + + msg = get_gpo_info(self.samdb, None) + + for m in msg: + # verify UNC path + unc = str(m['gPCFileSysPath'][0]) + try: + [dom_name, service, sharepath] = parse_unc(unc) + except ValueError: + raise CommandError("Invalid GPO path (%s)" % unc) + + # SMB connect to DC + conn = smb_connection(dc_hostname, service, lp=self.lp, + creds=self.creds) + + fs_sd = conn.get_acl(sharepath, security.SECINFO_OWNER | security.SECINFO_GROUP | security.SECINFO_DACL, security.SEC_FLAG_MAXIMUM_ALLOWED) + + if 'nTSecurityDescriptor' not in m: + raise CommandError("Could not read nTSecurityDescriptor. " + "This requires an Administrator account") + + ds_sd_ndr = m['nTSecurityDescriptor'][0] + ds_sd = ndr_unpack(security.descriptor, ds_sd_ndr).as_sddl() + + # Create a file system security descriptor + domain_sid = security.dom_sid(self.samdb.get_domain_sid()) + expected_fs_sddl = dsacl2fsacl(ds_sd, domain_sid) + + if (fs_sd.as_sddl(domain_sid) != expected_fs_sddl): + raise CommandError("Invalid GPO ACL %s on path (%s), should be %s" % (fs_sd.as_sddl(domain_sid), sharepath, expected_fs_sddl)) + +class cmd_admxload(Command): + """Loads samba admx files to sysvol""" + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H"), + Option("--admx-dir", help="Directory where admx templates are stored", + type=str, default=os.path.join(param.data_dir(), 'samba/admx')) + ] + + def run(self, H=None, sambaopts=None, credopts=None, versionopts=None, + admx_dir=None): + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + # We need to know writable DC to setup SMB connection + if H and H.startswith('ldap://'): + dc_hostname = H[7:] + self.url = H + else: + dc_hostname = netcmd_finddc(self.lp, self.creds) + self.url = dc_url(self.lp, self.creds, dc=dc_hostname) + + # SMB connect to DC + conn = smb_connection(dc_hostname, + 'sysvol', + lp=self.lp, + creds=self.creds) + + smb_dir = '\\'.join([self.lp.get('realm').lower(), + 'Policies', 'PolicyDefinitions']) + try: + conn.mkdir(smb_dir) + except NTSTATUSError as e: + if e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + elif e.args[0] != NT_STATUS_OBJECT_NAME_COLLISION: + raise + + for dirname, dirs, files in os.walk(admx_dir): + for fname in files: + path_in_admx = dirname.replace(admx_dir, '') + full_path = os.path.join(dirname, fname) + sub_dir = '\\'.join([smb_dir, path_in_admx]).replace('/', '\\') + smb_path = '\\'.join([sub_dir, fname]) + try: + create_directory_hier(conn, sub_dir) + except NTSTATUSError as e: + if e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + elif e.args[0] != NT_STATUS_OBJECT_NAME_COLLISION: + raise + with open(full_path, 'rb') as f: + try: + conn.savefile(smb_path, f.read()) + except NTSTATUSError as e: + if e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + self.outf.write('Installing ADMX templates to the Central Store ' + 'prevents Windows from displaying its own templates ' + 'in the Group Policy Management Console. You will ' + 'need to install these templates ' + 'from https://www.microsoft.com/en-us/download/102157 ' + 'to continue using Windows Administrative Templates.\n') + +class cmd_add_sudoers(GPOCommand): + """Adds a Samba Sudoers Group Policy to the sysvol + +This command adds a sudo rule to the sysvol for applying to winbind clients. + +The command argument indicates the final field in the sudo rule. +The user argument indicates the user specified in the parentheses. +The users and groups arguments are comma separated lists, which are combined to +form the first field in the sudo rule. +The --passwd argument specifies whether the sudo entry will require a password +be specified. The default is False, meaning the NOPASSWD field will be +specified in the sudo entry. + +Example: +samba-tool gpo manage sudoers add {31B2F340-016D-11D2-945F-00C04FB984F9} ALL ALL fakeu fakeg + +The example command will generate the following sudoers entry: +fakeu,fakeg% ALL=(ALL) NOPASSWD: ALL + """ + + synopsis = "%prog [groups] [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H"), + Option("--passwd", action='store_true', default=False, + help="Specify to indicate that sudo entry must provide a password") + ] + + takes_args = ["gpo", "command", "user", "users", "groups?"] + + def run(self, gpo, command, user, users, groups=None, passwd=None, + H=None, sambaopts=None, credopts=None, versionopts=None): + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + # We need to know writable DC to setup SMB connection + if H and H.startswith('ldap://'): + dc_hostname = H[7:] + self.url = H + else: + dc_hostname = netcmd_finddc(self.lp, self.creds) + self.url = dc_url(self.lp, self.creds, dc=dc_hostname) + + # SMB connect to DC + conn = smb_connection(dc_hostname, + 'sysvol', + lp=self.lp, + creds=self.creds) + + self.samdb_connect() + reg = RegistryGroupPolicies(gpo, self.lp, self.creds, self.samdb, H) + + realm = self.lp.get('realm') + vgp_dir = '\\'.join([realm.lower(), 'Policies', gpo, + 'MACHINE\\VGP\\VTLA\\Sudo', + 'SudoersConfiguration']) + vgp_xml = '\\'.join([vgp_dir, 'manifest.xml']) + try: + xml_data = ET.ElementTree(ET.fromstring(conn.loadfile(vgp_xml))) + policysetting = xml_data.getroot().find('policysetting') + data = policysetting.find('data') + except NTSTATUSError as e: + if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID, + NT_STATUS_OBJECT_NAME_NOT_FOUND, + NT_STATUS_OBJECT_PATH_NOT_FOUND]: + # The file doesn't exist, so create the xml structure + xml_data = ET.ElementTree(ET.Element('vgppolicy')) + policysetting = ET.SubElement(xml_data.getroot(), + 'policysetting') + pv = ET.SubElement(policysetting, 'version') + pv.text = '1' + name = ET.SubElement(policysetting, 'name') + name.text = 'Sudo Policy' + description = ET.SubElement(policysetting, 'description') + description.text = 'Sudoers File Configuration Policy' + apply_mode = ET.SubElement(policysetting, 'apply_mode') + apply_mode.text = 'merge' + data = ET.SubElement(policysetting, 'data') + load_plugin = ET.SubElement(data, 'load_plugin') + load_plugin.text = 'true' + elif e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + else: + raise + + sudoers_entry = ET.SubElement(data, 'sudoers_entry') + if passwd: + ET.SubElement(sudoers_entry, 'password') + command_elm = ET.SubElement(sudoers_entry, 'command') + command_elm.text = command + user_elm = ET.SubElement(sudoers_entry, 'user') + user_elm.text = user + listelement = ET.SubElement(sudoers_entry, 'listelement') + for u in users.split(','): + principal = ET.SubElement(listelement, 'principal') + principal.text = u + principal.attrib['type'] = 'user' + if groups is not None: + for g in groups.split(): + principal = ET.SubElement(listelement, 'principal') + principal.text = g + principal.attrib['type'] = 'group' + + out = BytesIO() + xml_data.write(out, encoding='UTF-8', xml_declaration=True) + out.seek(0) + try: + create_directory_hier(conn, vgp_dir) + conn.savefile(vgp_xml, out.read()) + reg.increment_gpt_ini(machine_changed=True) + except NTSTATUSError as e: + if e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + raise + +class cmd_list_sudoers(Command): + """List Samba Sudoers Group Policy from the sysvol + +This command lists sudo rules from the sysvol that will be applied to winbind clients. + +Example: +samba-tool gpo manage sudoers list {31B2F340-016D-11D2-945F-00C04FB984F9} + """ + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H"), + ] + + takes_args = ["gpo"] + + def run(self, gpo, H=None, sambaopts=None, credopts=None, versionopts=None): + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + # We need to know writable DC to setup SMB connection + if H and H.startswith('ldap://'): + dc_hostname = H[7:] + self.url = H + else: + dc_hostname = netcmd_finddc(self.lp, self.creds) + self.url = dc_url(self.lp, self.creds, dc=dc_hostname) + + # SMB connect to DC + conn = smb_connection(dc_hostname, + 'sysvol', + lp=self.lp, + creds=self.creds) + + realm = self.lp.get('realm') + vgp_xml = '\\'.join([realm.lower(), 'Policies', gpo, + 'MACHINE\\VGP\\VTLA\\Sudo', + 'SudoersConfiguration\\manifest.xml']) + try: + xml_data = ET.fromstring(conn.loadfile(vgp_xml)) + except NTSTATUSError as e: + if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID, + NT_STATUS_OBJECT_NAME_NOT_FOUND, + NT_STATUS_OBJECT_PATH_NOT_FOUND]: + # The file doesn't exist, so there is nothing to list + xml_data = None + elif e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + else: + raise + + if xml_data is not None: + policy = xml_data.find('policysetting') + data = policy.find('data') + for entry in data.findall('sudoers_entry'): + command = entry.find('command').text + user = entry.find('user').text + listelements = entry.findall('listelement') + principals = [] + for listelement in listelements: + principals.extend(listelement.findall('principal')) + if len(principals) > 0: + uname = ','.join([u.text if u.attrib['type'] == 'user' \ + else '%s%%' % u.text for u in principals]) + else: + uname = 'ALL' + nopassword = entry.find('password') is None + np_entry = ' NOPASSWD:' if nopassword else '' + p = '%s ALL=(%s)%s %s' % (uname, user, np_entry, command) + self.outf.write('%s\n' % p) + + pol_file = '\\'.join([realm.lower(), 'Policies', gpo, + 'MACHINE\\Registry.pol']) + try: + pol_data = ndr_unpack(preg.file, conn.loadfile(pol_file)) + except NTSTATUSError as e: + if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID, + NT_STATUS_OBJECT_NAME_NOT_FOUND, + NT_STATUS_OBJECT_PATH_NOT_FOUND]: + return # The file doesn't exist, so there is nothing to list + if e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + raise + + # Also list the policies set from the GPME + keyname = b'Software\\Policies\\Samba\\Unix Settings\\Sudo Rights' + for entry in pol_data.entries: + if get_bytes(entry.keyname) == keyname and \ + get_string(entry.data).strip(): + self.outf.write('%s\n' % entry.data) + +class cmd_remove_sudoers(GPOCommand): + """Removes a Samba Sudoers Group Policy from the sysvol + +This command removes a sudo rule from the sysvol from applying to winbind clients. + +Example: +samba-tool gpo manage sudoers remove {31B2F340-016D-11D2-945F-00C04FB984F9} 'fakeu ALL=(ALL) NOPASSWD: ALL' + """ + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H"), + ] + + takes_args = ["gpo", "entry"] + + def run(self, gpo, entry, H=None, sambaopts=None, credopts=None, versionopts=None): + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + # We need to know writable DC to setup SMB connection + if H and H.startswith('ldap://'): + dc_hostname = H[7:] + self.url = H + else: + dc_hostname = netcmd_finddc(self.lp, self.creds) + self.url = dc_url(self.lp, self.creds, dc=dc_hostname) + + # SMB connect to DC + conn = smb_connection(dc_hostname, + 'sysvol', + lp=self.lp, + creds=self.creds) + + self.samdb_connect() + reg = RegistryGroupPolicies(gpo, self.lp, self.creds, self.samdb, H) + + realm = self.lp.get('realm') + vgp_dir = '\\'.join([realm.lower(), 'Policies', gpo, + 'MACHINE\\VGP\\VTLA\\Sudo', + 'SudoersConfiguration']) + vgp_xml = '\\'.join([vgp_dir, 'manifest.xml']) + try: + xml_data = ET.ElementTree(ET.fromstring(conn.loadfile(vgp_xml))) + policysetting = xml_data.getroot().find('policysetting') + data = policysetting.find('data') + except NTSTATUSError as e: + if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID, + NT_STATUS_OBJECT_NAME_NOT_FOUND, + NT_STATUS_OBJECT_PATH_NOT_FOUND]: + data = None + elif e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + else: + raise + + pol_file = '\\'.join([realm.lower(), 'Policies', gpo, + 'MACHINE\\Registry.pol']) + try: + pol_data = ndr_unpack(preg.file, conn.loadfile(pol_file)) + except NTSTATUSError as e: + if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID, + NT_STATUS_OBJECT_NAME_NOT_FOUND, + NT_STATUS_OBJECT_PATH_NOT_FOUND]: + pol_data = None + elif e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + else: + raise + + entries = {} + for e in data.findall('sudoers_entry') if data else []: + command = e.find('command').text + user = e.find('user').text + listelements = e.findall('listelement') + principals = [] + for listelement in listelements: + principals.extend(listelement.findall('principal')) + if len(principals) > 0: + uname = ','.join([u.text if u.attrib['type'] == 'user' \ + else '%s%%' % u.text for u in principals]) + else: + uname = 'ALL' + nopassword = e.find('password') is None + np_entry = ' NOPASSWD:' if nopassword else '' + p = '%s ALL=(%s)%s %s' % (uname, user, np_entry, command) + entries[p] = e + + if entry in entries.keys(): + data.remove(entries[entry]) + + out = BytesIO() + xml_data.write(out, encoding='UTF-8', xml_declaration=True) + out.seek(0) + try: + create_directory_hier(conn, vgp_dir) + conn.savefile(vgp_xml, out.read()) + reg.increment_gpt_ini(machine_changed=True) + except NTSTATUSError as e: + if e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + raise + elif entry in ([e.data for e in pol_data.entries] if pol_data else []): + entries = [e for e in pol_data.entries if e.data != entry] + pol_data.num_entries = len(entries) + pol_data.entries = entries + + try: + conn.savefile(pol_file, ndr_pack(pol_data)) + reg.increment_gpt_ini(machine_changed=True) + except NTSTATUSError as e: + if e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + raise + else: + raise CommandError("Cannot remove '%s' because it does not exist" % + entry) + +class cmd_sudoers(SuperCommand): + """Manage Sudoers Group Policy Objects""" + subcommands = {} + subcommands["add"] = cmd_add_sudoers() + subcommands["list"] = cmd_list_sudoers() + subcommands["remove"] = cmd_remove_sudoers() + +class cmd_set_security(GPOCommand): + """Set Samba Security Group Policy to the sysvol + +This command sets a security setting to the sysvol for applying to winbind +clients. Not providing a value will unset the policy. +These settings only apply to the ADDC. + +Example: +samba-tool gpo manage security set {31B2F340-016D-11D2-945F-00C04FB984F9} MaxTicketAge 10 + +Possible policies: +MaxTicketAge Maximum lifetime for user ticket + Defined in hours + +MaxServiceAge Maximum lifetime for service ticket + Defined in minutes + +MaxRenewAge Maximum lifetime for user ticket renewal + Defined in minutes + +MinimumPasswordAge Minimum password age + Defined in days + +MaximumPasswordAge Maximum password age + Defined in days + +MinimumPasswordLength Minimum password length + Defined in characters + +PasswordComplexity Password must meet complexity requirements + 1 is Enabled, 0 is Disabled + """ + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H"), + ] + + takes_args = ["gpo", "policy", "value?"] + + def run(self, gpo, policy, value=None, H=None, sambaopts=None, + credopts=None, versionopts=None): + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + # We need to know writable DC to setup SMB connection + if H and H.startswith('ldap://'): + dc_hostname = H[7:] + self.url = H + else: + dc_hostname = netcmd_finddc(self.lp, self.creds) + self.url = dc_url(self.lp, self.creds, dc=dc_hostname) + + # SMB connect to DC + conn = smb_connection(dc_hostname, + 'sysvol', + lp=self.lp, + creds=self.creds) + + self.samdb_connect() + reg = RegistryGroupPolicies(gpo, self.lp, self.creds, self.samdb, H) + + realm = self.lp.get('realm') + inf_dir = '\\'.join([realm.lower(), 'Policies', gpo, + 'MACHINE\\Microsoft\\Windows NT\\SecEdit']) + inf_file = '\\'.join([inf_dir, 'GptTmpl.inf']) + try: + inf_data = ConfigParser(interpolation=None) + inf_data.optionxform=str + raw = conn.loadfile(inf_file) + try: + inf_data.read_file(StringIO(raw.decode())) + except UnicodeDecodeError: + inf_data.read_file(StringIO(raw.decode('utf-16'))) + except NTSTATUSError as e: + if e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + if e.args[0] not in [NT_STATUS_OBJECT_NAME_INVALID, + NT_STATUS_OBJECT_NAME_NOT_FOUND, + NT_STATUS_OBJECT_PATH_NOT_FOUND]: + raise + + section_map = { 'MaxTicketAge' : 'Kerberos Policy', + 'MaxServiceAge' : 'Kerberos Policy', + 'MaxRenewAge' : 'Kerberos Policy', + 'MinimumPasswordAge' : 'System Access', + 'MaximumPasswordAge' : 'System Access', + 'MinimumPasswordLength' : 'System Access', + 'PasswordComplexity' : 'System Access' + } + + section = section_map[policy] + if not inf_data.has_section(section): + inf_data.add_section(section) + if value is not None: + inf_data.set(section, policy, value) + else: + inf_data.remove_option(section, policy) + if len(inf_data.options(section)) == 0: + inf_data.remove_section(section) + + out = StringIO() + inf_data.write(out) + try: + create_directory_hier(conn, inf_dir) + conn.savefile(inf_file, get_bytes(out.getvalue())) + reg.increment_gpt_ini(machine_changed=True) + except NTSTATUSError as e: + if e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + else: + raise + +class cmd_list_security(Command): + """List Samba Security Group Policy from the sysvol + +This command lists security settings from the sysvol that will be applied to winbind clients. +These settings only apply to the ADDC. + +Example: +samba-tool gpo manage security list {31B2F340-016D-11D2-945F-00C04FB984F9} + """ + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H"), + ] + + takes_args = ["gpo"] + + def run(self, gpo, H=None, sambaopts=None, credopts=None, versionopts=None): + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + # We need to know writable DC to setup SMB connection + if H and H.startswith('ldap://'): + dc_hostname = H[7:] + self.url = H + else: + dc_hostname = netcmd_finddc(self.lp, self.creds) + self.url = dc_url(self.lp, self.creds, dc=dc_hostname) + + # SMB connect to DC + conn = smb_connection(dc_hostname, + 'sysvol', + lp=self.lp, + creds=self.creds) + + realm = self.lp.get('realm') + inf_file = '\\'.join([realm.lower(), 'Policies', gpo, + 'MACHINE\\Microsoft\\Windows NT\\SecEdit\\GptTmpl.inf']) + try: + inf_data = ConfigParser(interpolation=None) + inf_data.optionxform=str + raw = conn.loadfile(inf_file) + try: + inf_data.read_file(StringIO(raw.decode())) + except UnicodeDecodeError: + inf_data.read_file(StringIO(raw.decode('utf-16'))) + except NTSTATUSError as e: + if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID, + NT_STATUS_OBJECT_NAME_NOT_FOUND, + NT_STATUS_OBJECT_PATH_NOT_FOUND]: + return # The file doesn't exist, so there is nothing to list + if e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + raise + + for section in inf_data.sections(): + if section not in ['Kerberos Policy', 'System Access']: + continue + for key, value in inf_data.items(section): + self.outf.write('%s = %s\n' % (key, value)) + +class cmd_security(SuperCommand): + """Manage Security Group Policy Objects""" + subcommands = {} + subcommands["set"] = cmd_set_security() + subcommands["list"] = cmd_list_security() + +class cmd_list_smb_conf(Command): + """List Samba smb.conf Group Policy from the sysvol + +This command lists smb.conf settings from the sysvol that will be applied to winbind clients. + +Example: +samba-tool gpo manage smb_conf list {31B2F340-016D-11D2-945F-00C04FB984F9} + """ + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H"), + ] + + takes_args = ["gpo"] + + def run(self, gpo, H=None, sambaopts=None, credopts=None, versionopts=None): + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + # We need to know writable DC to setup SMB connection + if H and H.startswith('ldap://'): + dc_hostname = H[7:] + self.url = H + else: + dc_hostname = netcmd_finddc(self.lp, self.creds) + self.url = dc_url(self.lp, self.creds, dc=dc_hostname) + + # SMB connect to DC + conn = smb_connection(dc_hostname, + 'sysvol', + lp=self.lp, + creds=self.creds) + + realm = self.lp.get('realm') + pol_file = '\\'.join([realm.lower(), 'Policies', gpo, + 'MACHINE\\Registry.pol']) + try: + pol_data = ndr_unpack(preg.file, conn.loadfile(pol_file)) + except NTSTATUSError as e: + if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID, + NT_STATUS_OBJECT_NAME_NOT_FOUND, + NT_STATUS_OBJECT_PATH_NOT_FOUND]: + return # The file doesn't exist, so there is nothing to list + if e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + raise + + keyname = b'Software\\Policies\\Samba\\smb_conf' + lp = param.LoadParm() + for entry in pol_data.entries: + if get_bytes(entry.keyname) == keyname: + lp.set(entry.valuename, str(entry.data)) + val = lp.get(entry.valuename) + self.outf.write('%s = %s\n' % (entry.valuename, val)) + +class cmd_set_smb_conf(GPOCommand): + """Sets a Samba smb.conf Group Policy to the sysvol + +This command sets an smb.conf setting to the sysvol for applying to winbind +clients. Not providing a value will unset the policy. + +Example: +samba-tool gpo manage smb_conf set {31B2F340-016D-11D2-945F-00C04FB984F9} 'apply gpo policies' yes + """ + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H"), + ] + + takes_args = ["gpo", "setting", "value?"] + + def run(self, gpo, setting, value=None, H=None, sambaopts=None, credopts=None, + versionopts=None): + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + # We need to know writable DC to setup SMB connection + if H and H.startswith('ldap://'): + dc_hostname = H[7:] + self.url = H + else: + dc_hostname = netcmd_finddc(self.lp, self.creds) + self.url = dc_url(self.lp, self.creds, dc=dc_hostname) + + # SMB connect to DC + conn = smb_connection(dc_hostname, + 'sysvol', + lp=self.lp, + creds=self.creds) + + self.samdb_connect() + reg = RegistryGroupPolicies(gpo, self.lp, self.creds, self.samdb, H) + + realm = self.lp.get('realm') + pol_dir = '\\'.join([realm.lower(), 'Policies', gpo, 'MACHINE']) + pol_file = '\\'.join([pol_dir, 'Registry.pol']) + try: + pol_data = ndr_unpack(preg.file, conn.loadfile(pol_file)) + except NTSTATUSError as e: + if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID, + NT_STATUS_OBJECT_NAME_NOT_FOUND, + NT_STATUS_OBJECT_PATH_NOT_FOUND]: + pol_data = preg.file() # The file doesn't exist + elif e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + else: + raise + + if value is None: + if setting not in [e.valuename for e in pol_data.entries]: + raise CommandError("Cannot remove '%s' because it does " + "not exist" % setting) + entries = [e for e in pol_data.entries \ + if e.valuename != setting] + pol_data.entries = entries + pol_data.num_entries = len(entries) + else: + if get_string(value).lower() in ['yes', 'true', '1']: + etype = 4 + val = 1 + elif get_string(value).lower() in ['no', 'false', '0']: + etype = 4 + val = 0 + elif get_string(value).isnumeric(): + etype = 4 + val = int(get_string(value)) + else: + etype = 1 + val = get_bytes(value) + e = preg.entry() + e.keyname = b'Software\\Policies\\Samba\\smb_conf' + e.valuename = get_bytes(setting) + e.type = etype + e.data = val + entries = list(pol_data.entries) + entries.append(e) + pol_data.entries = entries + pol_data.num_entries = len(entries) + + try: + create_directory_hier(conn, pol_dir) + conn.savefile(pol_file, ndr_pack(pol_data)) + reg.increment_gpt_ini(machine_changed=True) + except NTSTATUSError as e: + if e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + raise + +class cmd_smb_conf(SuperCommand): + """Manage smb.conf Group Policy Objects""" + subcommands = {} + subcommands["list"] = cmd_list_smb_conf() + subcommands["set"] = cmd_set_smb_conf() + +class cmd_list_symlink(Command): + """List VGP Symbolic Link Group Policy from the sysvol + +This command lists symlink settings from the sysvol that will be applied to winbind clients. + +Example: +samba-tool gpo manage symlink list {31B2F340-016D-11D2-945F-00C04FB984F9} + """ + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H"), + ] + + takes_args = ["gpo"] + + def run(self, gpo, H=None, sambaopts=None, credopts=None, versionopts=None): + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + # We need to know writable DC to setup SMB connection + if H and H.startswith('ldap://'): + dc_hostname = H[7:] + self.url = H + else: + dc_hostname = netcmd_finddc(self.lp, self.creds) + self.url = dc_url(self.lp, self.creds, dc=dc_hostname) + + # SMB connect to DC + conn = smb_connection(dc_hostname, + 'sysvol', + lp=self.lp, + creds=self.creds) + + realm = self.lp.get('realm') + vgp_xml = '\\'.join([realm.lower(), 'Policies', gpo, + 'MACHINE\\VGP\\VTLA\\Unix', + 'Symlink\\manifest.xml']) + try: + xml_data = ET.fromstring(conn.loadfile(vgp_xml)) + except NTSTATUSError as e: + if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID, + NT_STATUS_OBJECT_NAME_NOT_FOUND, + NT_STATUS_OBJECT_PATH_NOT_FOUND]: + return # The file doesn't exist, so there is nothing to list + if e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + raise + + policy = xml_data.find('policysetting') + data = policy.find('data') + for file_properties in data.findall('file_properties'): + source = file_properties.find('source') + target = file_properties.find('target') + self.outf.write('ln -s %s %s\n' % (source.text, target.text)) + +class cmd_add_symlink(GPOCommand): + """Adds a VGP Symbolic Link Group Policy to the sysvol + +This command adds a symlink setting to the sysvol that will be applied to winbind clients. + +Example: +samba-tool gpo manage symlink add {31B2F340-016D-11D2-945F-00C04FB984F9} /tmp/source /tmp/target + """ + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H"), + ] + + takes_args = ["gpo", "source", "target"] + + def run(self, gpo, source, target, H=None, sambaopts=None, credopts=None, + versionopts=None): + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + # We need to know writable DC to setup SMB connection + if H and H.startswith('ldap://'): + dc_hostname = H[7:] + self.url = H + else: + dc_hostname = netcmd_finddc(self.lp, self.creds) + self.url = dc_url(self.lp, self.creds, dc=dc_hostname) + + # SMB connect to DC + conn = smb_connection(dc_hostname, + 'sysvol', + lp=self.lp, + creds=self.creds) + + self.samdb_connect() + reg = RegistryGroupPolicies(gpo, self.lp, self.creds, self.samdb, H) + + realm = self.lp.get('realm') + vgp_dir = '\\'.join([realm.lower(), 'Policies', gpo, + 'MACHINE\\VGP\\VTLA\\Unix\\Symlink']) + vgp_xml = '\\'.join([vgp_dir, 'manifest.xml']) + try: + xml_data = ET.ElementTree(ET.fromstring(conn.loadfile(vgp_xml))) + policy = xml_data.getroot().find('policysetting') + data = policy.find('data') + except NTSTATUSError as e: + if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID, + NT_STATUS_OBJECT_NAME_NOT_FOUND, + NT_STATUS_OBJECT_PATH_NOT_FOUND]: + # The file doesn't exist, so create the xml structure + xml_data = ET.ElementTree(ET.Element('vgppolicy')) + policysetting = ET.SubElement(xml_data.getroot(), + 'policysetting') + pv = ET.SubElement(policysetting, 'version') + pv.text = '1' + name = ET.SubElement(policysetting, 'name') + name.text = 'Symlink Policy' + description = ET.SubElement(policysetting, 'description') + description.text = 'Specifies symbolic link data' + data = ET.SubElement(policysetting, 'data') + elif e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + else: + raise + + file_properties = ET.SubElement(data, 'file_properties') + source_elm = ET.SubElement(file_properties, 'source') + source_elm.text = source + target_elm = ET.SubElement(file_properties, 'target') + target_elm.text = target + + out = BytesIO() + xml_data.write(out, encoding='UTF-8', xml_declaration=True) + out.seek(0) + try: + create_directory_hier(conn, vgp_dir) + conn.savefile(vgp_xml, out.read()) + reg.increment_gpt_ini(machine_changed=True) + except NTSTATUSError as e: + if e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + raise + +class cmd_remove_symlink(GPOCommand): + """Removes a VGP Symbolic Link Group Policy from the sysvol + +This command removes a symlink setting from the sysvol from applying to winbind +clients. + +Example: +samba-tool gpo manage symlink remove {31B2F340-016D-11D2-945F-00C04FB984F9} /tmp/source /tmp/target + """ + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H"), + ] + + takes_args = ["gpo", "source", "target"] + + def run(self, gpo, source, target, H=None, sambaopts=None, credopts=None, + versionopts=None): + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + # We need to know writable DC to setup SMB connection + if H and H.startswith('ldap://'): + dc_hostname = H[7:] + self.url = H + else: + dc_hostname = netcmd_finddc(self.lp, self.creds) + self.url = dc_url(self.lp, self.creds, dc=dc_hostname) + + # SMB connect to DC + conn = smb_connection(dc_hostname, + 'sysvol', + lp=self.lp, + creds=self.creds) + + self.samdb_connect() + reg = RegistryGroupPolicies(gpo, self.lp, self.creds, self.samdb, H) + + realm = self.lp.get('realm') + vgp_dir = '\\'.join([realm.lower(), 'Policies', gpo, + 'MACHINE\\VGP\\VTLA\\Unix\\Symlink']) + vgp_xml = '\\'.join([vgp_dir, 'manifest.xml']) + try: + xml_data = ET.ElementTree(ET.fromstring(conn.loadfile(vgp_xml))) + policy = xml_data.getroot().find('policysetting') + data = policy.find('data') + except NTSTATUSError as e: + if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID, + NT_STATUS_OBJECT_NAME_NOT_FOUND, + NT_STATUS_OBJECT_PATH_NOT_FOUND]: + raise CommandError("Cannot remove link from '%s' to '%s' " + "because it does not exist" % source, target) + elif e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + else: + raise + + for file_properties in data.findall('file_properties'): + source_elm = file_properties.find('source') + target_elm = file_properties.find('target') + if source_elm.text == source and target_elm.text == target: + data.remove(file_properties) + break + else: + raise CommandError("Cannot remove link from '%s' to '%s' " + "because it does not exist" % source, target) + + + out = BytesIO() + xml_data.write(out, encoding='UTF-8', xml_declaration=True) + out.seek(0) + try: + create_directory_hier(conn, vgp_dir) + conn.savefile(vgp_xml, out.read()) + reg.increment_gpt_ini(machine_changed=True) + except NTSTATUSError as e: + if e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + raise + +class cmd_symlink(SuperCommand): + """Manage symlink Group Policy Objects""" + subcommands = {} + subcommands["list"] = cmd_list_symlink() + subcommands["add"] = cmd_add_symlink() + subcommands["remove"] = cmd_remove_symlink() + +class cmd_list_files(Command): + """List VGP Files Group Policy from the sysvol + +This command lists files which will be copied from the sysvol and applied to winbind clients. + +Example: +samba-tool gpo manage files list {31B2F340-016D-11D2-945F-00C04FB984F9} + """ + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H"), + ] + + takes_args = ["gpo"] + + def run(self, gpo, H=None, sambaopts=None, credopts=None, versionopts=None): + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + # We need to know writable DC to setup SMB connection + if H and H.startswith('ldap://'): + dc_hostname = H[7:] + self.url = H + else: + dc_hostname = netcmd_finddc(self.lp, self.creds) + self.url = dc_url(self.lp, self.creds, dc=dc_hostname) + + # SMB connect to DC + conn = smb_connection(dc_hostname, + 'sysvol', + lp=self.lp, + creds=self.creds) + + realm = self.lp.get('realm') + vgp_xml = '\\'.join([realm.lower(), 'Policies', gpo, + 'MACHINE\\VGP\\VTLA\\Unix', + 'Files\\manifest.xml']) + try: + xml_data = ET.fromstring(conn.loadfile(vgp_xml)) + except NTSTATUSError as e: + if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID, + NT_STATUS_OBJECT_NAME_NOT_FOUND, + NT_STATUS_OBJECT_PATH_NOT_FOUND]: + return # The file doesn't exist, so there is nothing to list + if e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + raise + + policy = xml_data.find('policysetting') + data = policy.find('data') + for entry in data.findall('file_properties'): + source = entry.find('source').text + target = entry.find('target').text + user = entry.find('user').text + group = entry.find('group').text + mode = calc_mode(entry) + p = '%s\t%s\t%s\t%s -> %s' % \ + (stat_from_mode(mode), user, group, target, source) + self.outf.write('%s\n' % p) + +class cmd_add_files(GPOCommand): + """Add VGP Files Group Policy to the sysvol + +This command adds files which will be copied from the sysvol and applied to winbind clients. + +Example: +samba-tool gpo manage files add {31B2F340-016D-11D2-945F-00C04FB984F9} ./source.txt /usr/share/doc/target.txt root root 600 + """ + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H"), + ] + + takes_args = ["gpo", "source", "target", "user", "group", "mode"] + + def run(self, gpo, source, target, user, group, mode, H=None, + sambaopts=None, credopts=None, versionopts=None): + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + if not os.path.exists(source): + raise CommandError("Source '%s' does not exist" % source) + + # We need to know writable DC to setup SMB connection + if H and H.startswith('ldap://'): + dc_hostname = H[7:] + self.url = H + else: + dc_hostname = netcmd_finddc(self.lp, self.creds) + self.url = dc_url(self.lp, self.creds, dc=dc_hostname) + + # SMB connect to DC + conn = smb_connection(dc_hostname, + 'sysvol', + lp=self.lp, + creds=self.creds) + + self.samdb_connect() + reg = RegistryGroupPolicies(gpo, self.lp, self.creds, self.samdb, H) + + realm = self.lp.get('realm') + vgp_dir = '\\'.join([realm.lower(), 'Policies', gpo, + 'MACHINE\\VGP\\VTLA\\Unix\\Files']) + vgp_xml = '\\'.join([vgp_dir, 'manifest.xml']) + try: + xml_data = ET.ElementTree(ET.fromstring(conn.loadfile(vgp_xml))) + policy = xml_data.getroot().find('policysetting') + data = policy.find('data') + except NTSTATUSError as e: + if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID, + NT_STATUS_OBJECT_NAME_NOT_FOUND, + NT_STATUS_OBJECT_PATH_NOT_FOUND]: + # The file doesn't exist, so create the xml structure + xml_data = ET.ElementTree(ET.Element('vgppolicy')) + policysetting = ET.SubElement(xml_data.getroot(), + 'policysetting') + pv = ET.SubElement(policysetting, 'version') + pv.text = '1' + name = ET.SubElement(policysetting, 'name') + name.text = 'Files' + description = ET.SubElement(policysetting, 'description') + description.text = 'Represents file data to set/copy on clients' + data = ET.SubElement(policysetting, 'data') + elif e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + else: + raise + + file_properties = ET.SubElement(data, 'file_properties') + source_elm = ET.SubElement(file_properties, 'source') + source_elm.text = os.path.basename(source) + target_elm = ET.SubElement(file_properties, 'target') + target_elm.text = target + user_elm = ET.SubElement(file_properties, 'user') + user_elm.text = user + group_elm = ET.SubElement(file_properties, 'group') + group_elm.text = group + for ptype, shift in [('user', 6), ('group', 3), ('other', 0)]: + permissions = ET.SubElement(file_properties, 'permissions') + permissions.set('type', ptype) + if int(mode, 8) & (0o4 << shift): + ET.SubElement(permissions, 'read') + if int(mode, 8) & (0o2 << shift): + ET.SubElement(permissions, 'write') + if int(mode, 8) & (0o1 << shift): + ET.SubElement(permissions, 'execute') + + out = BytesIO() + xml_data.write(out, encoding='UTF-8', xml_declaration=True) + out.seek(0) + source_data = open(source, 'rb').read() + sysvol_source = '\\'.join([vgp_dir, os.path.basename(source)]) + try: + create_directory_hier(conn, vgp_dir) + conn.savefile(vgp_xml, out.read()) + conn.savefile(sysvol_source, source_data) + reg.increment_gpt_ini(machine_changed=True) + except NTSTATUSError as e: + if e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + raise + +class cmd_remove_files(GPOCommand): + """Remove VGP Files Group Policy from the sysvol + +This command removes files which would be copied from the sysvol and applied to winbind clients. + +Example: +samba-tool gpo manage files remove {31B2F340-016D-11D2-945F-00C04FB984F9} /usr/share/doc/target.txt + """ + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H"), + ] + + takes_args = ["gpo", "target"] + + def run(self, gpo, target, H=None, sambaopts=None, credopts=None, + versionopts=None): + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + # We need to know writable DC to setup SMB connection + if H and H.startswith('ldap://'): + dc_hostname = H[7:] + self.url = H + else: + dc_hostname = netcmd_finddc(self.lp, self.creds) + self.url = dc_url(self.lp, self.creds, dc=dc_hostname) + + # SMB connect to DC + conn = smb_connection(dc_hostname, + 'sysvol', + lp=self.lp, + creds=self.creds) + + self.samdb_connect() + reg = RegistryGroupPolicies(gpo, self.lp, self.creds, self.samdb, H) + + realm = self.lp.get('realm') + vgp_dir = '\\'.join([realm.lower(), 'Policies', gpo, + 'MACHINE\\VGP\\VTLA\\Unix\\Files']) + vgp_xml = '\\'.join([vgp_dir, 'manifest.xml']) + try: + xml_data = ET.ElementTree(ET.fromstring(conn.loadfile(vgp_xml))) + policy = xml_data.getroot().find('policysetting') + data = policy.find('data') + except NTSTATUSError as e: + if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID, + NT_STATUS_OBJECT_NAME_NOT_FOUND, + NT_STATUS_OBJECT_PATH_NOT_FOUND]: + raise CommandError("Cannot remove file '%s' " + "because it does not exist" % target) + elif e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + else: + raise + + for file_properties in data.findall('file_properties'): + source_elm = file_properties.find('source') + target_elm = file_properties.find('target') + if target_elm.text == target: + source = '\\'.join([vgp_dir, source_elm.text]) + conn.unlink(source) + data.remove(file_properties) + break + else: + raise CommandError("Cannot remove file '%s' " + "because it does not exist" % target) + + + out = BytesIO() + xml_data.write(out, encoding='UTF-8', xml_declaration=True) + out.seek(0) + try: + create_directory_hier(conn, vgp_dir) + conn.savefile(vgp_xml, out.read()) + reg.increment_gpt_ini(machine_changed=True) + except NTSTATUSError as e: + if e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + raise + +class cmd_files(SuperCommand): + """Manage Files Group Policy Objects""" + subcommands = {} + subcommands["list"] = cmd_list_files() + subcommands["add"] = cmd_add_files() + subcommands["remove"] = cmd_remove_files() + +class cmd_list_openssh(Command): + """List VGP OpenSSH Group Policy from the sysvol + +This command lists openssh options from the sysvol that will be applied to winbind clients. + +Example: +samba-tool gpo manage openssh list {31B2F340-016D-11D2-945F-00C04FB984F9} + """ + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H"), + ] + + takes_args = ["gpo"] + + def run(self, gpo, H=None, sambaopts=None, credopts=None, versionopts=None): + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + # We need to know writable DC to setup SMB connection + if H and H.startswith('ldap://'): + dc_hostname = H[7:] + self.url = H + else: + dc_hostname = netcmd_finddc(self.lp, self.creds) + self.url = dc_url(self.lp, self.creds, dc=dc_hostname) + + # SMB connect to DC + conn = smb_connection(dc_hostname, + 'sysvol', + lp=self.lp, + creds=self.creds) + + realm = self.lp.get('realm') + vgp_xml = '\\'.join([realm.lower(), 'Policies', gpo, + 'MACHINE\\VGP\\VTLA\\SshCfg', + 'SshD\\manifest.xml']) + try: + xml_data = ET.fromstring(conn.loadfile(vgp_xml)) + except NTSTATUSError as e: + if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID, + NT_STATUS_OBJECT_NAME_NOT_FOUND, + NT_STATUS_OBJECT_PATH_NOT_FOUND]: + return # The file doesn't exist, so there is nothing to list + if e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + raise + + policy = xml_data.find('policysetting') + data = policy.find('data') + configfile = data.find('configfile') + for configsection in configfile.findall('configsection'): + if configsection.find('sectionname').text: + continue + for kv in configsection.findall('keyvaluepair'): + self.outf.write('%s %s\n' % (kv.find('key').text, + kv.find('value').text)) + +class cmd_set_openssh(GPOCommand): + """Sets a VGP OpenSSH Group Policy to the sysvol + +This command sets an openssh setting to the sysvol for applying to winbind +clients. Not providing a value will unset the policy. + +Example: +samba-tool gpo manage openssh set {31B2F340-016D-11D2-945F-00C04FB984F9} KerberosAuthentication Yes + """ + + synopsis = "%prog [value] [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H"), + ] + + takes_args = ["gpo", "setting", "value?"] + + def run(self, gpo, setting, value=None, H=None, sambaopts=None, + credopts=None, versionopts=None): + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + # We need to know writable DC to setup SMB connection + if H and H.startswith('ldap://'): + dc_hostname = H[7:] + self.url = H + else: + dc_hostname = netcmd_finddc(self.lp, self.creds) + self.url = dc_url(self.lp, self.creds, dc=dc_hostname) + + # SMB connect to DC + conn = smb_connection(dc_hostname, + 'sysvol', + lp=self.lp, + creds=self.creds) + + self.samdb_connect() + reg = RegistryGroupPolicies(gpo, self.lp, self.creds, self.samdb, H) + + realm = self.lp.get('realm') + vgp_dir = '\\'.join([realm.lower(), 'Policies', gpo, + 'MACHINE\\VGP\\VTLA\\SshCfg\\SshD']) + vgp_xml = '\\'.join([vgp_dir, 'manifest.xml']) + try: + xml_data = ET.ElementTree(ET.fromstring(conn.loadfile(vgp_xml))) + policy = xml_data.getroot().find('policysetting') + data = policy.find('data') + configfile = data.find('configfile') + except NTSTATUSError as e: + if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID, + NT_STATUS_OBJECT_NAME_NOT_FOUND, + NT_STATUS_OBJECT_PATH_NOT_FOUND]: + # The file doesn't exist, so create the xml structure + xml_data = ET.ElementTree(ET.Element('vgppolicy')) + policysetting = ET.SubElement(xml_data.getroot(), + 'policysetting') + pv = ET.SubElement(policysetting, 'version') + pv.text = '1' + name = ET.SubElement(policysetting, 'name') + name.text = 'Configuration File' + description = ET.SubElement(policysetting, 'description') + description.text = 'Represents Unix configuration file settings' + apply_mode = ET.SubElement(policysetting, 'apply_mode') + apply_mode.text = 'merge' + data = ET.SubElement(policysetting, 'data') + configfile = ET.SubElement(data, 'configfile') + configsection = ET.SubElement(configfile, 'configsection') + ET.SubElement(configsection, 'sectionname') + elif e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + else: + raise + + if value is not None: + for configsection in configfile.findall('configsection'): + if configsection.find('sectionname').text: + continue # Ignore Quest SSH settings + settings = {} + for kv in configsection.findall('keyvaluepair'): + settings[kv.find('key')] = kv + if setting in settings.keys(): + settings[setting].text = value + else: + keyvaluepair = ET.SubElement(configsection, 'keyvaluepair') + key = ET.SubElement(keyvaluepair, 'key') + key.text = setting + dvalue = ET.SubElement(keyvaluepair, 'value') + dvalue.text = value + else: + for configsection in configfile.findall('configsection'): + if configsection.find('sectionname').text: + continue # Ignore Quest SSH settings + settings = {} + for kv in configsection.findall('keyvaluepair'): + settings[kv.find('key').text] = kv + if setting in settings.keys(): + configsection.remove(settings[setting]) + else: + raise CommandError("Cannot remove '%s' because it does " \ + "not exist" % setting) + + out = BytesIO() + xml_data.write(out, encoding='UTF-8', xml_declaration=True) + out.seek(0) + try: + create_directory_hier(conn, vgp_dir) + conn.savefile(vgp_xml, out.read()) + reg.increment_gpt_ini(machine_changed=True) + except NTSTATUSError as e: + if e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + raise + +class cmd_openssh(SuperCommand): + """Manage OpenSSH Group Policy Objects""" + subcommands = {} + subcommands["list"] = cmd_list_openssh() + subcommands["set"] = cmd_set_openssh() + +class cmd_list_startup(Command): + """List VGP Startup Script Group Policy from the sysvol + +This command lists the startup script policies currently set on the sysvol. + +Example: +samba-tool gpo manage scripts startup list {31B2F340-016D-11D2-945F-00C04FB984F9} + """ + + synopsis = "%prog [options]" + + takes_optiongroups = { + "sambaopts": options.SambaOptions, + "versionopts": options.VersionOptions, + "credopts": options.CredentialsOptions, + } + + takes_options = [ + Option("-H", "--URL", help="LDB URL for database or target server", type=str, + metavar="URL", dest="H"), + ] + + takes_args = ["gpo"] + + def run(self, gpo, H=None, sambaopts=None, credopts=None, versionopts=None): + self.lp = sambaopts.get_loadparm() + self.creds = credopts.get_credentials(self.lp, fallback_machine=True) + + # We need to know writable DC to setup SMB connection + if H and H.startswith('ldap://'): + dc_hostname = H[7:] + self.url = H + else: + dc_hostname = netcmd_finddc(self.lp, self.creds) + self.url = dc_url(self.lp, self.creds, dc=dc_hostname) + + # SMB connect to DC + conn = smb_connection(dc_hostname, + 'sysvol', + lp=self.lp, + creds=self.creds) + + realm = self.lp.get('realm') + vgp_xml = '\\'.join([realm.lower(), 'Policies', gpo, + 'MACHINE\\VGP\\VTLA\\Unix', + 'Scripts\\Startup\\manifest.xml']) + try: + xml_data = ET.fromstring(conn.loadfile(vgp_xml)) + except NTSTATUSError as e: + if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID, + NT_STATUS_OBJECT_NAME_NOT_FOUND, + NT_STATUS_OBJECT_PATH_NOT_FOUND]: + return # The file doesn't exist, so there is nothing to list + if e.args[0] == NT_STATUS_ACCESS_DENIED: + raise CommandError("The authenticated user does " + "not have sufficient privileges") + raise + + policy = xml_data.find('policysetting') + data = policy.find('data') + for listelement in data.findall('listelement'): + script = listelement.find('script') + script_path = '\\'.join(['\\', realm.lower(), 'Policies', gpo, + 'MACHINE\\VGP\\VTLA\\Unix\\Scripts', + 'Startup', script.text]) + parameters = listelement.find('parameters') + run_as = listelement.find('run_as') + if run_as is not None: + run_as = run_as.text + else: + run_as = 'root' + if parameters is not None: + parameters = parameters.text + else: + parameters = '' + self.outf.write('@reboot %s %s %s\n' % (run_as, script_path, + parameters)) + +class cmd_add_startup(GPOCommand): + """Adds VGP Startup Script Group Policy to the sysvol + +This command adds a startup script policy to the sysvol. + +Example: +samba-tool gpo manage scripts startup add {31B2F340-016D-11D2-945F-00C04FB984F9} test_script.sh '\\-n \\-p all' + """ + + synopsis = "%prog