diff options
Diffstat (limited to 'ansible_collections/cisco/intersight/playbooks')
58 files changed, 3332 insertions, 0 deletions
diff --git a/ansible_collections/cisco/intersight/playbooks/claim_device.yml b/ansible_collections/cisco/intersight/playbooks/claim_device.yml new file mode 100644 index 00000000..06d397c6 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/claim_device.yml @@ -0,0 +1,31 @@ +--- +# +# The hosts group used is provided by the group variable or defaulted to 'Intersight_Servers'. +# You can specify a specific host (or host group) on the command line: +# ansible-playbook ... -e group=<your host group> +# e.g., ansible-playbook server_profiles.yml -e group=TME_Demo +# +- hosts: "{{ group | default('Intersight_Servers') }}" + connection: local + gather_facts: false + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + tasks: + # Claim device + - name: Claim device + cisco.intersight.intersight_rest_api: + <<: *api_info + resource_path: /asset/DeviceClaims + api_body: { + "SecurityToken": "{{ SecurityToken }}", + "SerialNumber": "{{ SerialNumber }}" + } + update_method: post + delegate_to: localhost + run_once: true diff --git a/ansible_collections/cisco/intersight/playbooks/cos_server_policies_and_profiles.yml b/ansible_collections/cisco/intersight/playbooks/cos_server_policies_and_profiles.yml new file mode 100644 index 00000000..9adc3ebd --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/cos_server_policies_and_profiles.yml @@ -0,0 +1,353 @@ +--- +# +# Configure Server Profiles and Policies +# +# The hosts group used is provided by the group variable or defaulted to 'Intersight_Servers'. +# You can specify a specific host (or host group) on the command line: +# ansible-playbook ... -e group=<your host group> +# e.g., ansible-playbook server_profiles.yml -e group=TME_Demo +# +- hosts: "{{ group | default('Intersight_Servers') }}" + connection: local + collections: + - cisco.intersight + gather_facts: false + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + # Server Profile name default + profile_name: "SP-{{ inventory_hostname }}" + # Organization name + org_name: DevNet + tasks: + # Get the Organization Moid used by all profiles and policies + - name: "Get Organization {{ org_name }} Moid" + intersight_rest_api: + <<: *api_info + resource_path: /organization/Organizations + query_params: + $filter: "Name eq '{{ org_name }}'" + register: org_resp + delegate_to: localhost + tags: always + # + # Configure profiles specific to server (run for each server in the inventory) + # Server Profiles role will register a profile_resp and profile_resp list (from all hosts) can be used by policy tasks + # + - name: "Configure {{ profile_name }} Server Profile" + intersight_rest_api: + <<: *api_info + resource_path: /server/Profiles + query_params: + $filter: "Name eq '{{ profile_name }}'" + api_body: { + "Name": "{{ profile_name }}", + "AssignedServer": { + "Moid": "{{ server_moid }}", + "ObjectType": "compute.RackUnit" + }, + "Organization": { + "Moid": "{{ org_resp.api_response.Moid }}" + } + } + register: profile_resp + when: server_moid is defined + delegate_to: localhost + tags: server_profiles + # + # Enclose policy tasks in a block that runs once + # Policy API body is specified in a role specific vars section for each role import + # See https://intersight.com/apidocs/ or https://intersight.com/mobrowser/ for information on setting resource_path and api_body + # + - block: + # Boot Order policy + - import_role: + name: policies/server_policies + vars: + resource_path: /boot/PrecisionPolicies + api_body: { + "Name": "COS-Boot", + "ConfiguredBootMode": "Legacy", + "BootDevices": [ + { + "ObjectType": "boot.LocalDisk", + "Enabled": true, + "Name": "Disk", + "Slot": "MRAID" + }, + { + "ObjectType": "boot.VirtualMedia", + "Enabled": true, + "Name": "VM", + "Subtype": "cimc-mapped-dvd" + } + ], + "Organization": { + "Moid": "{{ org_resp.api_response.Moid }}" + } + } + tags: boot_order + # Adapter Configuration policy + - import_role: + name: policies/server_policies + vars: + resource_path: /adapter/ConfigPolicies + api_body: { + "Name":"COS-Adapter", + "Settings":[ + { + "SlotId":"MLOM", + "EthSettings":{ + "LldpEnabled":true + }, + "FcSettings":{ + "FipEnabled":false + } + } + ], + "Organization": { + "Moid": "{{ org_resp.api_response.Moid }}" + } + } + tags: adapter_configuration + # LAN Connectivity and related policies + - block: + # Ethernet Adapter + - name: "Configure Ethernet Adapter Policy" + intersight_rest_api: + <<: *api_info + resource_path: /vnic/EthAdapterPolicies + query_params: + $filter: "Name eq 'COS-EthernetAdapter'" + api_body: { + "Name": "COS-EthernetAdapter", + "InterruptSettings": { + "Count": 32, + "Mode": "MSIx", + "CoalescingTime": 125, + "CoalescingType": "MIN" + }, + "RxQueueSettings": { + "Count": 8, + "RingSize": 4096 + }, + "TxQueueSettings": { + "Count": 8, + "RingSize": 4096 + }, + "CompletionQueueSettings": { + "Count": 16 + }, + "Organization": { + "Moid": "{{ org_resp.api_response.Moid }}" + } + } + register: eth_adapter_resp + # Ethernet Network + - name: "Configure Ethernet Network Policy" + intersight_rest_api: + <<: *api_info + resource_path: /vnic/EthNetworkPolicies + query_params: + $filter: "Name eq 'COS-EthernetNetwork'" + api_body: { + "Name": "COS-EthernetNetwork", + "VlanSettings": { + "Mode": "TRUNK", + "DefaultVlan": 10 + }, + "Organization": { + "Moid": "{{ org_resp.api_response.Moid }}" + } + } + register: eth_network_resp + # Ethernet QoS + - name: "Configure Ethernet QoS Policy" + intersight_rest_api: + <<: *api_info + resource_path: /vnic/EthQosPolicies + query_params: + $filter: "Name eq 'COS-QoS'" + api_body: { + "Name": "COS-QoS", + "Mtu": 9000, + "Organization": { + "Moid": "{{ org_resp.api_response.Moid }}" + } + } + register: eth_qos_resp + # Import role for LAN Connectivity will register a policy_resp + - import_role: + name: policies/server_policies + vars: + resource_path: /vnic/LanConnectivityPolicies + api_body: { + "Name": "COS-LAN", + "Organization": { + "Moid": "{{ org_resp.api_response.Moid }}" + } + } + # vNIC configuration + # Ideally this would be in a loop, but Uplink is converted to a string (instead of the required int) when in a loop + - name: "Configure eth0" + intersight_rest_api: + <<: *api_info + resource_path: /vnic/EthIfs + query_params: + $filter: "LanConnectivityPolicy.Moid eq '{{ policy_resp.api_response.Moid }}' and Name eq 'eth0'" + api_body: { + "Name": "eth0", + "Placement": { + "Id": "MLOM", + "Uplink": 0 + }, + "Order": 0, + "EthAdapterPolicy": { + "Moid": "{{ eth_adapter_resp.api_response.Moid }}" + }, + "EthNetworkPolicy": { + "Moid": "{{ eth_network_resp.api_response.Moid }}" + }, + "EthQosPolicy": { + "Moid": "{{ eth_qos_resp.api_response.Moid }}" + }, + "LanConnectivityPolicy": { + "Moid": "{{ policy_resp.api_response.Moid }}" + }, + "Organization": { + "Moid": "{{ org_resp.api_response.Moid }}" + } + } + - name: "Configure eth1" + intersight_rest_api: + <<: *api_info + resource_path: /vnic/EthIfs + query_params: + $filter: "LanConnectivityPolicy.Moid eq '{{ policy_resp.api_response.Moid }}' and Name eq 'eth1'" + api_body: { + "Name": "eth1", + "Placement": { + "Id": "MLOM", + "Uplink": 1 + }, + "Order": 1, + "EthAdapterPolicy": { + "Moid": "{{ eth_adapter_resp.api_response.Moid }}" + }, + "EthNetworkPolicy": { + "Moid": "{{ eth_network_resp.api_response.Moid }}" + }, + "EthQosPolicy": { + "Moid": "{{ eth_qos_resp.api_response.Moid }}" + }, + "LanConnectivityPolicy": { + "Moid": "{{ policy_resp.api_response.Moid }}" + }, + "Organization": { + "Moid": "{{ org_resp.api_response.Moid }}" + } + } + tags: lan_connectivity + # NTP policy config + - import_role: + name: policies/server_policies + vars: + resource_path: /ntp/Policies + api_body: { + "Name": "COS-NTP", + "Enabled": true, + "NtpServers": [ + "173.38.201.115" + ], + "Organization": { + "Moid": "{{ org_resp.api_response.Moid }}" + } + } + tags: ntp + # Storage and related policies + - block: + # Disk Group policy + - name: "Configure Disk Group Policy" + intersight_rest_api: + <<: *api_info + resource_path: /storage/DiskGroupPolicies + query_params: + $filter: "Name eq 'COS-Disk'" + api_body: { + "Name":"COS-Disk", + "RaidLevel":"Raid1", + "SpanGroups":[ + { + "Disks":[ + { + "SlotNumber":13 + }, + { + "SlotNumber":14 + } + ] + } + ], + "UseJbods":true, + "Organization": { + "Moid": "{{ org_resp.api_response.Moid }}" + } + } + register: disk_group_resp + # Storage policy + - import_role: + name: policies/server_policies + vars: + resource_path: /storage/StoragePolicies + api_body: { + "Name": "COS-Storage", + "RetainPolicyVirtualDrives": true, + "UnusedDisksState": "Jbod", + "VirtualDrives": [ + { + "Name": "Boot", + "DiskGroupPolicy": "{{ disk_group_resp.api_response.Moid }}", + "AccessPolicy": "ReadWrite", + "ReadPolicy": "Default", + "WritePolicy": "WriteBackGoodBbu", + "IoPolicy": "Default", + "DriveCache": "Default", + "ExpandToAvailable": true, + "BootDrive": true + } + ], + "Organization": { + "Moid": "{{ org_resp.api_response.Moid }}" + } + } + tags: storage + # Virtual Media policy config + - import_role: + name: policies/server_policies + vars: + resource_path: /vmedia/Policies + api_body: { + "Name": "COS-VM", + "Mappings": [ + { + "MountProtocol": "http", + "VolumeName": "COS.3.13.6", + "DeviceType": "cdd", + "HostName": "sjc02dmz-rhel.sjc02dmz.net", + "RemotePath": "ibm", + "RemoteFile": "clevos-3.13.6.33-allinone-usbiso.iso" + } + ], + "Organization": { + "Moid": "{{ org_resp.api_response.Moid }}" + } + } + tags: virtual_media + # Policies are common, so only run this block once and not for every host + run_once: true + delegate_to: localhost diff --git a/ansible_collections/cisco/intersight/playbooks/deploy_server_profiles.yml b/ansible_collections/cisco/intersight/playbooks/deploy_server_profiles.yml new file mode 100644 index 00000000..6e9a3892 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/deploy_server_profiles.yml @@ -0,0 +1,38 @@ +--- +# +# Deploy Server Profiles +# +# The hosts group used is provided by the group variable or defaulted to 'Intersight_Servers'. +# You can specify a specific host (or host group) on the command line: +# ansible-playbook ... -e group=<your host group> +# e.g., ansible-playbook server_profiles.yml -e group=TME_Demo +# +- hosts: "{{ group | default('Intersight_Servers') }}" + connection: local + gather_facts: false + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + # if api_key vars are omitted, INTERSIGHT_API_KEY_ID, INTERSIGHT_API_PRIVATE_KEY, + # and INTERSIGHT_API_URI environment variables used for API key data + api_private_key: "{{ api_private_key | default(omit) }}" + api_key_id: "{{ api_key_id | default(omit) }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + # Server Profile name default + profile_name: "SP-{{ inventory_hostname }}" + tasks: + # Deploy (or perform other action) + # action can be given on the command line if needed, e.g., ansible-playbook ... -e action=Unassign + # to delete a profile (profile must 1st be unassigned): ansible-playbook ... -e state=absent -e action=No-op + - name: Deploy (or user defined action) Server Profile + cisco.intersight.intersight_rest_api: + <<: *api_info + resource_path: /server/Profiles + query_params: + $filter: "Name eq '{{ profile_name }}'" + api_body: { + "Action": "{{ profile_action | default('Deploy') }}" + } + delegate_to: localhost diff --git a/ansible_collections/cisco/intersight/playbooks/derive_profiles.yml b/ansible_collections/cisco/intersight/playbooks/derive_profiles.yml new file mode 100644 index 00000000..9f8ef51f --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/derive_profiles.yml @@ -0,0 +1,72 @@ +--- +# +# include_tasks for deriving profiles from a template +# +# Get the Organization Moid +- name: "Get {{ template_name }}_DERIVED-{{ item }} Profile Moid" + intersight_rest_api: + api_private_key: "{{ api_private_key | default(omit) }}" + api_key_id: "{{ api_key_id | default(omit) }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + resource_path: /server/Profiles + query_params: + $filter: "Name eq '{{ template_name }}_DERIVED-{{ item }}'" + register: profile_resp +# Derive profiles from template (if profiles don't already exist) +- name: "POST to derive {{ template_name }}_DERIVED-{{ item }}" + intersight_rest_api: + api_private_key: "{{ api_private_key | default(omit) }}" + api_key_id: "{{ api_key_id | default(omit) }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + resource_path: /bulk/MoCloners + update_method: post + api_body: { + "Sources": [ + { + "ClassId": "mo.MoRef", + "ObjectType": "server.ProfileTemplate", + "Moid": "{{ template_resp.api_response.Moid }}" + } + ], + "Targets": [ + { + "Name": "{{ template_name }}_DERIVED-{{ item }}", + "ObjectType": "server.Profile", + "Organization": { + "Moid": "{{ org_resp.api_response.Moid }}" + }, + "ClassId": "server.Profile" + } + ] + } + when: profile_resp.api_response is not defined or not profile_resp.api_response +# POST updates to derived profiles if template was changed +- name: "POST to update {{ template_name }}_DERIVED-{{ item }}" + intersight_rest_api: + api_private_key: "{{ api_private_key | default(omit) }}" + api_key_id: "{{ api_key_id | default(omit) }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + resource_path: /bulk/MoMergers + update_method: post + api_body: { + "Sources": [ + { + "ObjectType": "server.ProfileTemplate", + "Moid": "{{ template_resp.api_response.Moid }}" + } + ], + "Targets": [ + { + "ObjectType": "server.Profile", + "Moid": "{{ profile_resp.api_response.Moid }}" + } + ], + "MergeAction":"Replace" + } + when: profile_resp.api_response and template_resp.changed diff --git a/ansible_collections/cisco/intersight/playbooks/devnet_inventory b/ansible_collections/cisco/intersight/playbooks/devnet_inventory new file mode 100644 index 00000000..aa3b889a --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/devnet_inventory @@ -0,0 +1,23 @@ +[Intersight_HX] +# Note: at least one host (e.g., sjc07-r13-501) must be present for update_*_inventory.yml to work +sjc07-r13-501 +sjc07-r13-503 + +[Intersight_Servers] +C220M5-WZP23230LJ6 server_moid=5f0736dc6176752d37dbe9f4 model=UCSC-C220-M5SX +C220M5-WZP23230LJC server_moid=5ec59a786176752d377205f1 model=UCSC-C220-M5SX +C240M4-FCH1906V37P server_moid=5e8c974d6176752d332f44c9 model=UCSC-C240-M4S2 +C220-FCH2050V0LB server_moid=5dee9ce46176752d332eb867 model=UCSC-C220-M4L + +[Intersight:children] +Intersight_HX +Intersight_Servers + +[all:vars] +api_private_key=~/Downloads/DevNetSecretKey.txt +api_key_id=596cc79e5d91b400010d15ad/5db71f977564612d30cc3860/5f0f42d47564612d3363b87b +organization=DevNet +boot_order_policy=COS-Boot +local_user_policy=devnet-guest-admin +ntp_policy=lab-ntp +virtual_media_policy=COS-VM diff --git a/ansible_collections/cisco/intersight/playbooks/example_hx_host_vars b/ansible_collections/cisco/intersight/playbooks/example_hx_host_vars new file mode 100644 index 00000000..34ea804d --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/example_hx_host_vars @@ -0,0 +1,91 @@ +# HX Cluster Profile Settings +intersight_org_name: Test-Org +hx_cluster_name: M5-Hybrid +hx_mgmt_platform: FI +hx_hypervisor_type: ESXi +hxdp_version: 4.5(1a) +ucs_firmware_version: 4.2(1a) +hx_mgmt_mac_prefix: 00:25:B5:7F +#hx_replication_factor: 3 +#hx_vdi_optimization: false +hx_disk_cleanup: true +#hx_laz_autoconfig: false + +# VCenter Settings +hx_vcenter_hostname: vcenter.hx.lab.cisco.com +hx_vcenter_username: administrator@vsphere.local +hx_vcenter_datacenter: Datacenter + +# HX Credentials +hx_hypervisor_admin: root +hx_hypervisor_factory_password: true +#hx_hypervisor_password: +#hx_dp_root_password: +#hx_vcenter_password: + +# HX Network Services Settings +hx_sys_config_timezone: America/Los_Angeles +hx_sys_config_dns_servers: + - 10.29.133.110 +hx_sys_config_ntp_servers: + - ntp1.hx.lab.cisco.com + - ntp2.hx.lab.cisco.com +hx_sys_config_dns_domain: hx.lab.cisco.com + +# HX Networking Settings +hx_mgmt_ip: 10.29.133.237 +hx_mgmt_vm_ip_start: 10.29.133.238 +hx_mgmt_vm_ip_end: 10.29.133.241 +hx_mgmt_netmask: 255.255.255.0 +hx_mgmt_gateway: 10.29.133.1 +hx_jumbo_frames: true +hx_mgmt_vlan_name: hx-mgmt-133 +hx_mgmt_vlan_id: 133 +hx_migration_vlan_name: vmotion-200 +hx_migration_vlan_id: 200 +hx_data_vlan_name: storage-51 +hx_data_vlan_id: 51 +#hx_vm_vlan_name: vm-network-100 +#hx_vm_vlan: 100 +hx_guest_vm_vlans: + - {"Name": vm-network-100, "VlanId": 100} + - {"Name": vm-network-101, "VlanId": 101} + +# HX Auto Support Settings +#hx_auto_support_enable: false +hx_auto_support_receipient: beveritt@cisco.com + +# HX Proxy Settings +hx_proxy_setting_hostname: proxy-wsa.esl.cisco.com +hx_proxy_setting_port: 80 + +# FC Settings +hx_vsan_a_name: vsan-10 +hx_vsan_a_id: 10 +hx_vsan_b_name: vsan-20 +hx_vsan_b_id: 20 +hx_fc_wwxn_range_start: 20:00:00:25:B5:7F +hx_fc_wwxn_range_end: 20:00:00:25:B5:7F + +# iSCSI Settings +hx_iscsi_vlan_a_name: iscsi-110 +hx_iscsi_vlan_a_id: 110 +hx_iscsi_vlan_b_name: iscsi-111 +hx_iscsi_vlan_b_id: 111 + +# HX Node Settings +hx_node_profile_prefix: hx220m5 +esx_mgmt_ip_start: 10.29.133.246 +esx_mgmt_ip_end: 10.29.133.249 +ucs_kvm_start_ip: 10.29.133.242 +ucs_kvm_end_ip: 10.29.133.245 +ucs_kvm_gateway: 10.29.133.1 +ucs_kvm_netmask: 255.255.255.0 +hx_mac_start: 00:25:B5:7F +hx_mac_end: 00:25:B5:7F + +hx_servers: + - SJC2-151-K27-6332-9 + - SJC2-151-K27-6332-10 + - SJC2-151-K27-6332-11 + - SJC2-151-K27-6332-12
\ No newline at end of file diff --git a/ansible_collections/cisco/intersight/playbooks/example_imm_inventory b/ansible_collections/cisco/intersight/playbooks/example_imm_inventory new file mode 100644 index 00000000..fcbd3583 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/example_imm_inventory @@ -0,0 +1,15 @@ +[Intersight_Servers] +SJC07-R14-FI-1-1-7 +SJC07-R14-FI-1-1-8 + +# Examples can use Intersight or Intersight_Servers host group +[Intersight:children] +Intersight_Servers + +# For examples that use localhost, all:vars allows key lookup +[all:vars] +api_key_id=596cc79e5d91b400010d15ad/5db71f977564612d30cc3860/5e9217a57564612d302f475b +# Policies to use with profiles +boot_order_policy=tf-module-boot-policy +imc_access_policy=tf-module-SJC07-R14-15-access +lan_connectivity_policy=tf-module-lan-connectivity-policy
\ No newline at end of file diff --git a/ansible_collections/cisco/intersight/playbooks/example_inventory b/ansible_collections/cisco/intersight/playbooks/example_inventory new file mode 100644 index 00000000..af8c4100 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/example_inventory @@ -0,0 +1,14 @@ +[Intersight_HX] +# Note: at least one host (e.g., sjc07-r13-501) must be present for update_*_inventory.yml to work +sjc07-r13-501 +sjc07-r13-503 + +[Intersight_Servers] + +[Intersight:children] +Intersight_HX +Intersight_Servers + +[all:vars] +api_private_key=~/Downloads/SecretKey.txt +api_key_id=596cc79e5d91b400010d15ad/5f0ce0ad7564612d3311a1f3/5f0ea8eb7564612d334ccb5a diff --git a/ansible_collections/cisco/intersight/playbooks/firmware_direct_download.yml b/ansible_collections/cisco/intersight/playbooks/firmware_direct_download.yml new file mode 100644 index 00000000..4277f930 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/firmware_direct_download.yml @@ -0,0 +1,85 @@ +--- +# +# The hosts group used is provided by the group variable or defaulted to 'Intersight_Servers'. +# You can specify a specific host (or host group) on the command line: +# ansible-playbook ... -e group=<your host group> +# e.g., ansible-playbook server_profiles.yml -e group=TME_Demo +# +- hosts: "{{ group | default('Intersight_Servers') }}" + connection: local + collections: + - cisco.intersight + gather_facts: false + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + # if api_key vars are omitted, INTERSIGHT_API_KEY_ID, INTERSIGHT_API_PRIVATE_KEY, + # and INTERSIGHT_API_URI environment variables used for API key data + api_private_key: "{{ api_private_key | default(omit) }}" + api_key_id: "{{ api_key_id | default(omit) }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + # Firmware Version + fw_version: 4.2(2d) + tasks: + # Set the distributable type based on the management mode and server type + - set_fact: + dist_type: IMMHOST + when: mode == 'Intersight' or mode == 'IntersightStandalone' + - set_fact: + dist_type: UMMBLADE + when: mode == 'UCSM' and object_type == 'Blade' + - set_fact: + dist_type: UMMRACK + when: mode == 'UCSM' and object_type == 'RackUnit' + # Get a user defined FW version + - name: Get Moid of user defined FW version + intersight_rest_api: + <<: *api_info + resource_path: /firmware/Distributables + query_params: + $filter: "SupportedModels eq '{{ model }}' and Version eq '{{ fw_version }}' and Tags.Key eq 'cisco.meta.distributabletype' and Tags.Value eq '{{ dist_type }}' and Tags.Key eq 'cisco.meta.repositorytype' and Tags.Value eq 'IntersightCloud'" + delegate_to: localhost + register: fw_resp + # Update server firmware with a post based on server moid + - name: Update server firmware + intersight_rest_api: + <<: *api_info + resource_path: /firmware/Upgrades + query_params: + $filter: "Server.Moid eq '{{ server_moid }}'" + update_method: post + api_body: { + "DirectDownload": { + "Upgradeoption": "upgrade_mount_only" + }, + "Distributable": { + "Moid": "{{ fw_resp.api_response.Moid }}" + }, + "Server": { + "Moid": "{{ server_moid }}", + "ObjectType": "compute.{{ object_type }}" + }, + "UpgradeType": "direct_upgrade", + "SkipEstimateImpact": true + } + delegate_to: localhost + register: update_resp + when: + - server_moid is defined + - fw_resp.api_response.Moid is defined + # Wait for download/update to complete + - name: Check firmware download/update status + intersight_rest_api: + <<: *api_info + resource_path: /firmware/UpgradeStatuses + query_params: + $filter: "Moid eq '{{ update_resp.api_response.UpgradeStatus.Moid }}'" + delegate_to: localhost + register: status_resp + until: status_resp.api_response.Overallstatus == 'pending' or status_resp.api_response.Overallstatus == 'success' + # 60 minutes to allow download/update to complete + retries: 60 + delay: 60 + when: + - update_resp.api_response is defined diff --git a/ansible_collections/cisco/intersight/playbooks/hcl_status.yml b/ansible_collections/cisco/intersight/playbooks/hcl_status.yml new file mode 100644 index 00000000..7281c259 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/hcl_status.yml @@ -0,0 +1,46 @@ +--- +# +# The hosts group used is provided by the group variable or defaulted to 'Intersight_Servers'. +# You can specify a specific host (or host group) on the command line: +# ansible-playbook ... -e group=<your host group> +# e.g., ansible-playbook server_profiles.yml -e group=TME_Demo +# +- hosts: "{{ group | default('Intersight_Servers') }}" + collections: + - cisco.intersight + connection: local + gather_facts: false + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + tasks: + # Get HclStatus + - name: Get HCL Status for Server + intersight_rest_api: + <<: *api_info + resource_path: /cond/HclStatuses + query_params: + $filter: "ManagedObject.Moid eq '{{ server_moid }}'" + delegate_to: localhost + register: hcl_resp + when: + - server_moid is defined + # Create .csv file with version and status information + - copy: + content: | + Name, FW version, OS vendor, OS version, HW status, SW status, Overall Status + {% for host in hostvars %} + {% set vars = hostvars[host|string] %} + {% if vars.hcl_resp.api_response is defined %} + {{ vars.inventory_hostname }}, {{ vars.hcl_resp.api_response.HclFirmwareVersion }}, {{ vars.hcl_resp.api_response.HclOsVendor }}, {{ vars.hcl_resp.api_response.HclOsVersion }}, {{ vars.hcl_resp.api_response.HardwareStatus }}, {{ vars.hcl_resp.api_response.SoftwareStatus }}, {{ vars.hcl_resp.api_response.Status }} {{ vars.hcl_resp.api_response.ServerReason }} + {% endif %} + {% endfor %} + dest: /tmp/hcl_status.csv + backup: false + run_once: true + delegate_to: localhost diff --git a/ansible_collections/cisco/intersight/playbooks/hyperflex_cluster_profiles.yml b/ansible_collections/cisco/intersight/playbooks/hyperflex_cluster_profiles.yml new file mode 100644 index 00000000..d70523db --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/hyperflex_cluster_profiles.yml @@ -0,0 +1,172 @@ +--- +# +# Configure HyperFlex Cluster Profiles +# +# The hosts group used is provided by the group variable or defaulted to 'Intersight_HX'. +# You can specify a specific host (or host group) on the command line: +# ansible-playbook ... -e group=<your host group> +# e.g., ansible-playbook server_profiles.yml -e group=TME_Demo +# +- hosts: "{{ group | default('Intersight_HX') }}" + connection: local + gather_facts: false + vars: + # If your inventory or host/group_vars don't specify required api key information, you can set directly below: + # api_private_key: ~/Downloads/SecretKey.txt + # api_key_id: 5a3404ac3768393836093cab/5b02fa7e6d6c356772394170/5b02fad36d6c356772394449 + vars_prompt: + + - name: "hx_vcenter_password" + prompt: "Enter the vCenter administrative password" + private: yes + confirm: yes + unsafe: yes + + - name: "hx_hypervisor_password" + prompt: "Enter the new ESXi nodes' administrative password" + private: yes + confirm: yes + unsafe: yes + + - name: "hx_dp_root_password" + prompt: "Enter the HyperFlex administrative password" + private: yes + confirm: yes + unsafe: yes + + - name: "execute_auto_support" + prompt: "Do you need to enable Auto Support settings? (yes/no)" + private: no + + - name: "execute_proxy" + prompt: "Do you need to configure proxy settings? (yes/no)" + private: no + + - name: "execute_iscsi" + prompt: "Do you need to configure additional vNICs for iSCSI settings? (yes/no)" + private: no + + - name: "execute_fc" + prompt: "Do you need to configure additional vHBAs for FC settings? (yes/no)" + private: no + + tasks: + # Intersight Org + - import_role: + name: policies/hyperflex_policies/intersight_org + vars: + org_name: "{{ intersight_org_name }}" + tags: ['org'] + # Cluster Profile + - import_role: + name: policies/hyperflex_policies/cluster_profile + vars: + hx_cluster_profile: "{{ hx_cluster_name }}" + tags: ['cluster_profile'] + # Software Version + - import_role: + name: policies/hyperflex_policies/software_version + vars: + hx_software_policy: "{{ hx_cluster_name }}-software-version-policy" + tags: ['software'] + # DNS + - import_role: + name: policies/hyperflex_policies/sys_config + vars: + hx_sys_config_policy: "{{ hx_cluster_name }}-sys-config-policy" + tags: ['dns'] + # Security + - import_role: + name: policies/hyperflex_policies/local_credential + vars: + hx_local_credential_policy: "{{ hx_cluster_name }}-local-credential-policy" + tags: ['security'] + # vCenter + - import_role: + name: policies/hyperflex_policies/vcenter + vars: + hx_vcenter_config_policy: "{{ hx_cluster_name }}-vcenter-config-policy" + tags: ['vcenter'] + # Storage Config + - import_role: + name: policies/hyperflex_policies/cluster_storage + vars: + hx_cluster_storage_policy: "{{ hx_cluster_name }}-cluster-storage-policy" + tags: ['storage'] + # Auto Support + - import_role: + name: policies/hyperflex_policies/auto_support + vars: + hx_auto_support_policy: "{{ hx_cluster_name }}-auto-support-policy" + hx_auto_support_enable: true + when: execute_auto_support|bool + tags: ['autosupport'] + # Proxy + - import_role: + name: policies/hyperflex_policies/proxy + vars: + hx_proxy_setting_policy: "{{ hx_cluster_name }}-proxy-setting-policy" + when: execute_proxy|bool + tags: ['proxy'] + # FC + - import_role: + name: policies/hyperflex_policies/fc + vars: + hx_fc_setting_policy: "{{ hx_cluster_name }}-ext-fc-storage-policy" + hx_fc_setting_enable: true + when: execute_fc|bool + tags: ['fc'] + # iSCSI + - import_role: + name: policies/hyperflex_policies/iscsi + vars: + hx_iscsi_setting_policy: "{{ hx_cluster_name }}-ext-iscsi-storage-policy" + hx_iscsi_setting_enable: true + when: execute_iscsi|bool + tags: ['iscsi'] + # Network Config + - import_role: + name: policies/hyperflex_policies/cluster_network + vars: + hx_cluster_network_policy: "{{ hx_cluster_name }}-cluster-network-policy" + tags: ['network'] + # Node IP and Hostname + - import_role: + name: policies/hyperflex_policies/node_config + vars: + hx_node_config_policy: "{{ hx_cluster_name }}-node-config-policy" + tags: ['nodes'] + + - debug: + msg: "All policies and the HyperFlex cluster profile have been created." + + - name: "Prompt to assign" + pause: + prompt: "Proceed with physical node assignment? (yes/no)" + echo: yes + register: assign_response + run_once: true + tags: ['prompt_assign'] + + # Assign servers to cluster profile and set deployment action + - import_role: + name: policies/hyperflex_policies/node_profiles + tags: ['assign'] + when: assign_response.user_input|bool + + - name: "Prompt to deploy" + pause: + prompt: "Proceed with cluster deployment? (yes/no)" + echo: yes + register: deploy_response + run_once: true + tags: ['prompt_deploy'] + + # Set cluster profile deployment action + - import_role: + name: policies/hyperflex_policies/deploy + tags: ['deploy'] + when: deploy_response.user_input|bool + + - debug: + msg: "HyperFlex cluster creation is complete." diff --git a/ansible_collections/cisco/intersight/playbooks/hyperflex_edge_cluster_profiles.yml b/ansible_collections/cisco/intersight/playbooks/hyperflex_edge_cluster_profiles.yml new file mode 100644 index 00000000..c0144a0a --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/hyperflex_edge_cluster_profiles.yml @@ -0,0 +1,148 @@ +--- +# +# Configure HyperFlex Edge Cluster Profiles +# +# The hosts group used is provided by the group variable or defaulted to 'Intersight_HX'. +# You can specify a specific host (or host group) on the command line: +# ansible-playbook ... -e group=<your host group> +# e.g., ansible-playbook server_profiles.yml -e group=TME_Demo +# +- hosts: "{{ group | default('Intersight_HX') }}" + connection: local + gather_facts: false + vars: + # If your inventory or host/group_vars don't specify required api key information, you can set directly below: + # api_private_key: ~/Downloads/SecretKey.txt + # api_key_id: 5a3404ac3768393836093cab/5b02fa7e6d6c356772394170/5b02fad36d6c356772394449 + vars_prompt: + + - name: "hx_vcenter_password" + prompt: "Enter the vCenter administrative password" + private: yes + confirm: yes + unsafe: yes + + - name: "hx_hypervisor_password" + prompt: "Enter the new ESXi nodes' administrative password" + private: yes + confirm: yes + unsafe: yes + + - name: "hx_dp_root_password" + prompt: "Enter the HyperFlex administrative password" + private: yes + confirm: yes + unsafe: yes + + - name: "execute_auto_support" + prompt: "Do you need to enable Auto Support settings? (yes/no)" + private: no + + - name: "execute_proxy" + prompt: "Do you need to configure proxy settings? (yes/no)" + private: no + + tasks: + # Intersight Org + - import_role: + name: policies/hyperflex_policies/intersight_org + vars: + org_name: "{{ intersight_org_name }}" + tags: ['org'] + # Cluster Profile + - import_role: + name: policies/hyperflex_policies/edge_cluster_profile + vars: + hx_cluster_profile: "{{ hx_cluster_name }}" + tags: ['cluster_profile'] + # Software Version + - import_role: + name: policies/hyperflex_policies/edge_software_version + vars: + hx_software_policy: "{{ hx_cluster_name }}-software-version-policy" + tags: ['software'] + # DNS + - import_role: + name: policies/hyperflex_policies/sys_config + vars: + hx_sys_config_policy: "{{ hx_cluster_name }}-sys-config-policy" + tags: ['dns'] + # Security + - import_role: + name: policies/hyperflex_policies/local_credential + vars: + hx_local_credential_policy: "{{ hx_cluster_name }}-local-credential-policy" + tags: ['security'] + # vCenter + - import_role: + name: policies/hyperflex_policies/vcenter + vars: + hx_vcenter_config_policy: "{{ hx_cluster_name }}-vcenter-config-policy" + tags: ['vcenter'] + # Storage Config + - import_role: + name: policies/hyperflex_policies/edge_cluster_storage + vars: + hx_cluster_storage_policy: "{{ hx_cluster_name }}-cluster-storage-policy" + tags: ['storage'] + # Auto Support + - import_role: + name: policies/hyperflex_policies/auto_support + vars: + hx_auto_support_policy: "{{ hx_cluster_name }}-auto-support-policy" + hx_auto_support_enable: true + when: execute_auto_support|bool + tags: ['autosupport'] + # Proxy + - import_role: + name: policies/hyperflex_policies/proxy + vars: + hx_proxy_setting_policy: "{{ hx_cluster_name }}-proxy-setting-policy" + when: execute_proxy|bool + tags: ['proxy'] + # Network Config + - import_role: + name: policies/hyperflex_policies/edge_cluster_network + vars: + hx_cluster_network_policy: "{{ hx_cluster_name }}-cluster-network-policy" + tags: ['network'] + # Node IP and Hostname + - import_role: + name: policies/hyperflex_policies/node_config + vars: + hx_node_config_policy: "{{ hx_cluster_name }}-node-config-policy" + tags: ['nodes'] + + - debug: + msg: "All policies and the HyperFlex cluster profile have been created." + + - name: "Prompt to assign" + pause: + prompt: "Proceed with physical node assignment? (yes/no)" + echo: yes + register: assign_response + run_once: true + tags: ['prompt_assign'] + + # Assign servers to cluster profile and set deployment action + - import_role: + name: policies/hyperflex_policies/node_profiles + tags: ['assign'] + when: assign_response.user_input|bool + + - name: "Prompt to deploy" + pause: + prompt: "Proceed with cluster deployment? (yes/no)" + echo: yes + register: deploy_response + run_once: true + tags: ['prompt_deploy'] + + # Set cluster profile deployment action + - import_role: + name: policies/hyperflex_policies/deploy + tags: ['deploy'] + when: deploy_response.user_input|bool + + - debug: + msg: "HyperFlex Edge cluster creation is complete." diff --git a/ansible_collections/cisco/intersight/playbooks/intersight_boot_order_policy.yml b/ansible_collections/cisco/intersight/playbooks/intersight_boot_order_policy.yml new file mode 100644 index 00000000..ea20cc06 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/intersight_boot_order_policy.yml @@ -0,0 +1,32 @@ +--- +# Example Playbook: cisco.intersight.intersight_boot_order_policy +# Runs on localhost since policies are only configured once +# Author: Tse Kai "Kevin" Chan (@BrightScale) +- hosts: localhost + connection: local + collections: + - cisco.intersight + gather_facts: false + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + tasks: + - name: Configure Boot Order Policy + intersight_boot_order_policy: + <<: *api_info + organization: "{{ organization | default(omit) }}" + name: COS-Boot + description: Boot Order policy for lab use + tags: + - Key: Site + Value: RCDN + configured_boot_mode: Legacy + boot_devices: + - device_type: Local Disk + device_name: Boot-Lun + controller_slot: MRAID + - device_type: Virtual Media + device_name: vmedia diff --git a/ansible_collections/cisco/intersight/playbooks/intersight_domain_profile.yml b/ansible_collections/cisco/intersight/playbooks/intersight_domain_profile.yml new file mode 100644 index 00000000..f8c5eae9 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/intersight_domain_profile.yml @@ -0,0 +1,140 @@ +--- +# +# Configure UCS Domain Profiles +# +- hosts: localhost + connection: local + collections: + - cisco.intersight + gather_facts: false + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + # if api_key vars are omitted, INTERSIGHT_API_KEY_ID, INTERSIGHT_API_PRIVATE_KEY, + # and INTERSIGHT_API_URI environment variables used for API key data + api_private_key: "{{ api_private_key | default(omit) }}" + api_key_id: "{{ api_key_id | default(omit) }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + # Domain Profile name + profile_name: emulator + org_name: dsoper-DevNet + port_policy: server-1-6-eth-pc-47-48 + qos_policy: required-qos + # Fabric Intersight A and B Serial Numbers + fia_sn: FDO23021WJ6 + fib_sn: FDO23070UA2 + tasks: + # Get the Organization Moid + - name: "Get {{ org_name }} Organization Moid" + intersight_rest_api: + <<: *api_info + resource_path: /organization/Organizations + query_params: + $filter: "Name eq '{{ org_name }}'" + register: org_resp + # Get the Port Policy + - name: "Get {{ port_policy }} Port Policy Moid" + intersight_rest_api: + <<: *api_info + resource_path: /fabric/PortPolicies + query_params: + $filter: "Name eq '{{ port_policy }}'" + register: port_resp + # Get the QoS Policy + - name: "Get {{ qos_policy }} Qos Policy Moid" + intersight_rest_api: + <<: *api_info + resource_path: /fabric/SystemQosPolicies + query_params: + $filter: "Name eq '{{ qos_policy }}'" + register: qos_resp + # Get FI A Moid + - name: "Get FI A {{ fia_sn }} Moid" + intersight_rest_api: + <<: *api_info + resource_path: /network/Elements + query_params: + $filter: "Serial eq '{{ fia_sn }}'" + register: fia_resp + # Get FI B Moid + - name: "Get FI B {{ fib_sn }} Moid" + intersight_rest_api: + <<: *api_info + resource_path: /network/Elements + query_params: + $filter: "Serial eq '{{ fib_sn }}'" + register: fib_resp + # Config Domain (SwitchCluster) Profile + - name: "Configure {{ profile_name }} Domain Profile" + intersight_rest_api: + <<: *api_info + state: "{{ state | default('present') }}" + resource_path: /fabric/SwitchClusterProfiles + query_params: + $filter: "Name eq '{{ profile_name }}'" + api_body: { + "Name": "{{ profile_name }}", + "Organization": { + "Moid": "{{ org_resp.api_response.Moid }}" + } + } + register: profile_resp + # Config Switch Profile A with Policy Bucket + # Command line arg -e profile_action=Unassign can be used to unassign the profile + # Command line arg -e profile_action=Deploy can be used to deploy the profile + - name: "Configure {{ profile_name }}-A Switch Profile" + intersight_rest_api: + <<: *api_info + resource_path: /fabric/SwitchProfiles + query_params: + $filter: "Name eq '{{ profile_name }}-A'" + api_body: { + "Name": "{{ profile_name }}-A", + "SwitchClusterProfile": { + "Moid": "{{ profile_resp.api_response.Moid }}" + }, + "PolicyBucket": [ + { + "Moid": "{{ port_resp.api_response.Moid }}", + "ObjectType": "fabric.PortPolicy" + }, + { + "Moid": "{{ qos_resp.api_response.Moid }}", + "ObjectType": "fabric.SystemQosPolicy" + } + ], + "AssignedSwitch": { + "Moid": "{{ fia_resp.api_response.Moid }}" + }, + "Action": "{{ profile_action | default('No-op') }}" + } + when: profile_resp.api_response is defined and profile_resp.api_response + # Config Switch Profile B with Policy Bucket + - name: "Configure {{ profile_name }}-B Switch Profile" + intersight_rest_api: + <<: *api_info + resource_path: /fabric/SwitchProfiles + query_params: + $filter: "Name eq '{{ profile_name }}-B'" + api_body: { + "Name": "{{ profile_name }}-B", + "SwitchClusterProfile": { + "Moid": "{{ profile_resp.api_response.Moid }}" + }, + "PolicyBucket": [ + { + "Moid": "{{ port_resp.api_response.Moid }}", + "ObjectType": "fabric.PortPolicy" + }, + { + "Moid": "{{ qos_resp.api_response.Moid }}", + "ObjectType": "fabric.SystemQosPolicy" + } + ], + "AssignedSwitch": { + "Moid": "{{ fib_resp.api_response.Moid }}" + }, + "Action": "{{ profile_action | default('No-op') }}" + } + when: profile_resp.api_response is defined and profile_resp.api_response diff --git a/ansible_collections/cisco/intersight/playbooks/intersight_imc_access_policy.yml b/ansible_collections/cisco/intersight/playbooks/intersight_imc_access_policy.yml new file mode 100644 index 00000000..070d0027 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/intersight_imc_access_policy.yml @@ -0,0 +1,25 @@ +--- +# Example Playbook: cisco.intersight.intersight_..._policy +# Runs on localhost since policies are only configured once +- hosts: localhost + connection: local + collections: + - cisco.intersight + gather_facts: false + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + tasks: + - name: Configure IMC Access policy + intersight_imc_access_policy: + <<: *api_info + name: "{{ imc_access_name | default('sjc02-d23-access') }}" + tags: + - Key: Site + Value: SJC02 + description: Updated IMC access for SJC labs + vlan_id: "{{ imc_access_vlan | default(131) }}" + ip_pool: "{{ ip_pool | default('sjc02-d23-ext-mgmt') }}" diff --git a/ansible_collections/cisco/intersight/playbooks/intersight_lan_connectivity_policy.yml b/ansible_collections/cisco/intersight/playbooks/intersight_lan_connectivity_policy.yml new file mode 100644 index 00000000..91c1446e --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/intersight_lan_connectivity_policy.yml @@ -0,0 +1,134 @@ +--- +# +# Configure LAN Connectivity Policy +# +- hosts: localhost + connection: local + collections: + - cisco.intersight + gather_facts: false + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + # if api_key vars are omitted, INTERSIGHT_API_KEY_ID, INTERSIGHT_API_PRIVATE_KEY, + # and INTERSIGHT_API_URI environment variables used for API key data + api_private_key: "{{ api_private_key | default(omit) }}" + api_key_id: "{{ api_key_id | default(omit) }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + # LAN Connectivity Policy name + lcp_name: SJC07-R14-R15-lan-conn + eth_net_group: sjc07-248-net-group + eth_net_control: default-eth-net-control + eth_qos: default-eth-qos + eth_adapter: eth-adapter + mac_pool: sjc07-de31-mac + vnic_name: eth0 + org_name: dsoper-DevNet + tasks: + # Get the Organization Moid + - name: "Get {{ org_name }} Organization Moid" + intersight_rest_api: + <<: *api_info + resource_path: /organization/Organizations + query_params: + $filter: "Name eq '{{ org_name }}'" + register: org_resp + # Get the Ethernet Network Group Policy + - name: "Get {{ eth_net_group }} Ethernet Network Group Policy Moid" + intersight_rest_api: + <<: *api_info + resource_path: /fabric/EthNetworkGroupPolicies + query_params: + $filter: "Name eq '{{ eth_net_group }}'" + register: eth_net_group_resp + # Get the Ethernet Network Control Policy + - name: "Get {{ eth_net_control }} Ethernet Network Control Policy Moid" + intersight_rest_api: + <<: *api_info + resource_path: /fabric/EthNetworkControlPolicies + query_params: + $filter: "Name eq '{{ eth_net_control }}'" + register: eth_net_control_resp + # Get the Ethernet QoS Policy + - name: "Get {{ eth_qos }} Ethernet QoS Policy Moid" + intersight_rest_api: + <<: *api_info + resource_path: /vnic/EthQosPolicies + query_params: + $filter: "Name eq '{{ eth_qos }}'" + register: eth_qos_resp + # Get the Ethernet Network Group Policy + - name: "Get {{ eth_adapter }} Ethernet Adapter Policy Moid" + intersight_rest_api: + <<: *api_info + resource_path: /vnic/EthAdapterPolicies + query_params: + $filter: "Name eq '{{ eth_adapter }}'" + register: eth_adapter_resp + # Get MAC Address Pool + - name: "Get {{ mac_pool }} MAC Address Pool Moid" + intersight_rest_api: + <<: *api_info + resource_path: /macpool/Pools + query_params: + $filter: "Name eq '{{ mac_pool }}'" + register: mac_resp + # Config LAN Connectivity Policy + - name: "Configure {{ lcp_name }} LAN Connectivity Policy" + intersight_rest_api: + <<: *api_info + state: "{{ state | default('present') }}" + resource_path: /vnic/LanConnectivityPolicies + query_params: + $filter: "Name eq '{{ lcp_name }}'" + api_body: { + "Name": "{{ lcp_name }}", + "Organization": { + "Moid": "{{ org_resp.api_response.Moid }}" + }, + "PlacementMode": "auto", + "TargetPlatform": "FIAttached" + } + register: lcp_resp + # Config vNIC with LAN Connectivity Policy + - name: "Configure {{ vnic_name }} vNIC" + intersight_rest_api: + <<: *api_info + resource_path: /vnic/EthIfs + query_params: + $filter: "Name eq '{{ vnic_name }}'" + api_body: { + "Name": "{{ vnic_name }}", + "MacAddressType": "POOL", + "MacPool": { + "Moid": "{{ mac_resp.api_response.Moid }}", + }, + "Placement": { + "SwitchId": "A", + "AutoSlotId": false, + "AutoPciLink": false + }, + "Cdn": { + "Source": "vnic" + }, + "FailoverEnabled": true, + "FabricEthNetworkGroupPolicy": [ + { + "Moid": "{{ eth_net_group_resp.api_response.Moid }}" + } + ], + "FabricEthNetworkControlPolicy": { + "Moid": "{{ eth_net_control_resp.api_response.Moid }}" + }, + "EthQosPolicy": { + "Moid": "{{ eth_qos_resp.api_response.Moid }}" + }, + "EthAdapterPolicy": { + "Moid": "{{ eth_adapter_resp.api_response.Moid }}" + }, + "LanConnectivityPolicy": { + "Moid": "{{ lcp_resp.api_response.Moid }}" + } + } + when: lcp_resp.api_response is defined and lcp_resp.api_response diff --git a/ansible_collections/cisco/intersight/playbooks/intersight_local_user_policy.yml b/ansible_collections/cisco/intersight/playbooks/intersight_local_user_policy.yml new file mode 100644 index 00000000..a387a95c --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/intersight_local_user_policy.yml @@ -0,0 +1,55 @@ +--- +# Example Playbook: cisco.intersight.intersight_..._policy +# Runs on localhost since policies are only configured once +- hosts: localhost + connection: local + collections: + - cisco.intersight + gather_facts: false + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + # if api_key vars are omitted, INTERSIGHT_API_KEY_ID, INTERSIGHT_API_PRIVATE_KEY, + # and INTERSIGHT_API_URI environment variables used for API key data + api_private_key: "{{ api_private_key | default(omit) }}" + api_key_id: "{{ api_key_id | default(omit) }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + # + # Example using vault: + # 1. Place the vault password into a plain text file (this is the password for vault access - do not check this into any repos!) + # $ cat vault_password_file + # ... + # 2. Encrypt a string (e.g., 'notagoodpassword'). You will later decrypt using your vault password file + # $ ansible-vault encrypt_string --vault-id tme@vault_password_file 'notagoodpassword' --name 'vault_password' + # (response is the encrypting string) + # 3. Place the vault variable in your playbook (example below): + # 4. Run the playbook and supply the vault password file (used to decrypt the vaulted password in the playbook) + # $ ansible-playbook -i inventory --vault-id tme@vault_password_file intersight_local_user_policy.yml + # + vault_password: !vault | + $ANSIBLE_VAULT;1.2;AES256;tme + 36656264656638646566313633353832396138616264313032303433656636643638363864653936 + 6532646363303435633965383432633630306566323838640a363566376234303366313064306162 + 39326331373231643333616335393232353633393834653161633032383539383537656336666639 + 3635306535366233660a356235393664653538386136626439646137626531663135363636326131 + 3538 + tasks: + - name: Configure Local User policy + intersight_local_user_policy: + <<: *api_info + name: "{{ local_user_policy | default('guest-admin') }}" + tags: + - Key: username + Value: guest + description: Username guest with admin role + enforce_strong_password: true + enable_password_expiry: false + password_history: 5 + purge: true + always_update_password: true + local_users: + - username: guest + role: admin + password: "{{ vault_password }}" diff --git a/ansible_collections/cisco/intersight/playbooks/intersight_ntp_policy.yml b/ansible_collections/cisco/intersight/playbooks/intersight_ntp_policy.yml new file mode 100644 index 00000000..9f4661b8 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/intersight_ntp_policy.yml @@ -0,0 +1,27 @@ +--- +# Example Playbook: cisco.intersight.intersight_ntp_policy +# Runs on localhost since policies are only configured once +- hosts: localhost + connection: local + collections: + - cisco.intersight + gather_facts: false + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + tasks: + - name: Configure NTP Policy + intersight_ntp_policy: + <<: *api_info + organization: "{{ organization | default(omit) }}" + name: lab-ntp + description: NTP policy for lab use + tags: + - Key: Site + Value: RCDN + ntp_servers: + - ntp.esl.cisco.com + timezone: America/Chicago diff --git a/ansible_collections/cisco/intersight/playbooks/intersight_port_policy.yml b/ansible_collections/cisco/intersight/playbooks/intersight_port_policy.yml new file mode 100644 index 00000000..83ba5a57 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/intersight_port_policy.yml @@ -0,0 +1,88 @@ +--- +# +# Configure Fabric Port Policies +# +- hosts: localhost + connection: local + collections: + - cisco.intersight + gather_facts: false + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + # if api_key vars are omitted, INTERSIGHT_API_KEY_ID, INTERSIGHT_API_PRIVATE_KEY, + # and INTERSIGHT_API_URI environment variables used for API key data + api_private_key: "{{ api_private_key | default(omit) }}" + api_key_id: "{{ api_key_id | default(omit) }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + # Port Policy name + port_name: server-1-6-eth-pc-47-48 + org_name: dsoper-DevNet + tasks: + # Get the Organization Moid + - name: "Get {{ org_name }} Organization Moid" + intersight_rest_api: + <<: *api_info + resource_path: /organization/Organizations + query_params: + $filter: "Name eq '{{ org_name }}'" + register: org_resp + # Config Port Policy + - name: "Configure {{ port_name }} Port Policy" + intersight_rest_api: + <<: *api_info + state: "{{ state | default('present') }}" + resource_path: /fabric/PortPolicies + query_params: + $filter: "Name eq '{{ port_name }}'" + api_body: { + "Name": "{{ port_name }}", + "DeviceModel": "UCS-FI-6454", + "Organization": { + "Moid": "{{ org_resp.api_response.Moid }}" + } + } + register: port_resp + # Config Server Roles + - name: "Configure Server Roles" + intersight_rest_api: + <<: *api_info + resource_path: /fabric/ServerRoles + query_params: + $filter: "PortPolicy.Moid eq '{{ port_resp.api_response.Moid }}' and PortId eq {{ item }}" + api_body: { + "Fec": "Auto", + "PortId": "{{ item }}", + "PortPolicy": { + "Moid": "{{ port_resp.api_response.Moid }}" + }, + "SlotId": 1 + } + loop: "{{ range(1, 6+1) | list }}" + when: port_resp.api_response is defined and port_resp.api_response + # Config Uplink Port Channel Roles + - name: "Configure Uplink Port Channel Roles" + intersight_rest_api: + <<: *api_info + resource_path: /fabric/UplinkPcRoles + query_params: + $filter: "PortPolicy.Moid eq '{{ port_resp.api_response.Moid }}'" + api_body: { + "AdminSpeed": "Auto", + "PcId": 47, + "PortPolicy": { + "Moid": "{{ port_resp.api_response.Moid }}" + }, + "Ports": [ + { + "PortId": 47, + "SlotId": 1 + }, + { + "PortId": 48, + "SlotId": 1 + } + ] + } + when: port_resp.api_response is defined and port_resp.api_response diff --git a/ansible_collections/cisco/intersight/playbooks/intersight_server_profile.yml b/ansible_collections/cisco/intersight/playbooks/intersight_server_profile.yml new file mode 100644 index 00000000..27787344 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/intersight_server_profile.yml @@ -0,0 +1,51 @@ +--- +# +# Configure Server Profiles +# +# The hosts group used is provided by the group variable or defaulted to 'Intersight_Servers'. +# You can specify a specific host (or host group) on the command line: +# ansible-playbook ... -e group=<your host group> +# e.g., ansible-playbook server_profiles.yml -e group=TME_Demo +# +- hosts: "{{ group | default('Intersight_Servers') }}" + connection: local + collections: + - cisco.intersight + gather_facts: false + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + # if api_key vars are omitted, INTERSIGHT_API_KEY_ID, INTERSIGHT_API_PRIVATE_KEY, + # and INTERSIGHT_API_URI environment variables used for API key data + api_private_key: "{{ api_private_key | default(omit) }}" + api_key_id: "{{ api_key_id | default(omit) }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + # Server Profile name default + profile_name: "SP-{{ inventory_hostname }}" + tasks: + # + # Configure profiles specific to server (run for each server in the inventory) + # + - set_fact: + mode: Standalone + when: mode is not defined or mode == 'IntersightStandalone' + - set_fact: + mode: FIAttached + when: mode == 'Intersight' + - name: "Configure {{ profile_name }} Server Profile" + intersight_server_profile: + <<: *api_info + organization: "{{ organization | default(omit) }}" + name: "{{ profile_name }}" + target_platform: "{{ mode | default(omit) }}" + description: "Updated Profile for server name {{ inventory_hostname }}" + assigned_server: "{{ server_moid | default(omit) }}" + boot_order_policy: "{{ boot_order_policy | default(omit) }}" + imc_access_policy: "{{ imc_access_policy | default(omit) }}" + lan_connectivity_policy: "{{ lan_connectivity_policy | default(omit) }}" + local_user_policy: "{{ local_user_policy | default(omit) }}" + ntp_policy: "{{ ntp_policy | default(omit) }}" + virtual_media_policy: "{{ virtual_media_policy | default(omit) }}" + delegate_to: localhost diff --git a/ansible_collections/cisco/intersight/playbooks/intersight_server_profile_template.yml b/ansible_collections/cisco/intersight/playbooks/intersight_server_profile_template.yml new file mode 100644 index 00000000..b64d60d4 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/intersight_server_profile_template.yml @@ -0,0 +1,94 @@ +--- +# +# Configure Server Profile Templates +# +- hosts: localhost + connection: local + collections: + - cisco.intersight + gather_facts: false + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + # if api_key vars are omitted, INTERSIGHT_API_KEY_ID, INTERSIGHT_API_PRIVATE_KEY, + # and INTERSIGHT_API_URI environment variables used for API key data + api_private_key: "{{ api_private_key | default(omit) }}" + api_key_id: "{{ api_key_id | default(omit) }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + # Server Profile Template name + template_name: sp-devnet + org_name: dsoper-DevNet + imc_access_policy: access-devnet + ntp_policy: LabNTP + uuid_pool: uuid-devnet + num_profiles: 3 + tasks: + # Get the Organization Moid + - name: "Get {{ org_name }} Organization Moid" + intersight_rest_api: + <<: *api_info + resource_path: /organization/Organizations + query_params: + $filter: "Name eq '{{ org_name }}'" + register: org_resp + # Get the Access Policy + - name: "Get {{ imc_access_policy }} Access Policy Moid" + intersight_rest_api: + <<: *api_info + resource_path: /access/Policies + query_params: + $filter: "Name eq '{{ imc_access_policy }}'" + register: access_resp + # Get the NTP Policy + - name: "Get {{ ntp_policy }} NTP Policy Moid" + intersight_rest_api: + <<: *api_info + resource_path: /ntp/Policies + query_params: + $filter: "Name eq '{{ ntp_policy }}'" + register: ntp_resp + # Get the UUID Pool + - name: "Get {{ uuid_pool }} UUID Pool Moid" + intersight_rest_api: + <<: *api_info + resource_path: /uuidpool/Pools + query_params: + $filter: "Name eq '{{ uuid_pool }}'" + register: uuid_resp + # Config SP Template using Policy Buckets + - name: "Configure {{ template_name }} Server Profile Template" + intersight_rest_api: + <<: *api_info + resource_path: /server/ProfileTemplates + query_params: + $filter: "Name eq '{{ template_name }}'" + api_body: { + "Name": "{{ template_name }}", + "Organization": { + "Moid": "{{ org_resp.api_response.Moid }}" + }, + "PolicyBucket": [ + { + "Moid": "{{ ntp_resp.api_response.Moid }}", + "ObjectType": "ntp.Policy" + }, + { + "Moid": "{{ access_resp.api_response.Moid }}", + "ObjectType": "access.Policy" + } + ], + "Tags": [], + "TargetPlatform": "FIAttached", + "UuidAddressType": "POOL", + "UuidPool": { + "Moid": "{{ uuid_resp.api_response.Moid }}", + "ObjectType": "uuidpool.Pool" + } + } + register: template_resp + # Derive profiles from template (if profiles don't already exist) + - name: "Derive Profiles from {{ template_name}}" + include_tasks: derive_profiles.yml + loop: "{{ range(1, num_profiles+1) | list }}" diff --git a/ansible_collections/cisco/intersight/playbooks/intersight_virtual_media_policy.yml b/ansible_collections/cisco/intersight/playbooks/intersight_virtual_media_policy.yml new file mode 100644 index 00000000..7064a06d --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/intersight_virtual_media_policy.yml @@ -0,0 +1,30 @@ +--- +# Example Playbook: cisco.intersight.intersight_virtual_media_policy +# Runs on localhost since policies are only configured once +- hosts: localhost + connection: local + collections: + - cisco.intersight + gather_facts: false + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + tasks: + - name: Configure Virtual Media Policy + intersight_virtual_media_policy: + <<: *api_info + organization: DevNet + name: COS-VM + description: Virtual Media policy for lab use + tags: + - Key: Site + Value: RCDN + cdd_virtual_media: + mount_type: nfs + volume: nfs-cdd + remote_hostname: 172.28.224.77 + remote_path: /mnt/SHARE/ISOS/CENTOS + remote_file: CentOS7.iso diff --git a/ansible_collections/cisco/intersight/playbooks/only_new_server_profiles.yml b/ansible_collections/cisco/intersight/playbooks/only_new_server_profiles.yml new file mode 100644 index 00000000..e3b0d1d8 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/only_new_server_profiles.yml @@ -0,0 +1,72 @@ +--- +# +# Configure Server Profiles +# +# The hosts group used is provided by the group variable or defaulted to 'Intersight_Servers'. +# You can specify a specific host (or host group) on the command line: +# ansible-playbook ... -e group=<your host group> +# e.g., ansible-playbook server_profiles.yml -e group=TME_Demo +# +- hosts: "{{ group | default('Intersight_Servers') }}" + connection: local + collections: + - cisco.intersight + gather_facts: false + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + # Key can be directly specified, and vault should be used to encrypt: + # Ex. ansible-vault encrypt_string --vault-id tme@/Users/dsoper/Documents/vault_password_file '-----BEGIN EC PRIVATE KEY----- + # <your private key data> + # -----END EC PRIVATE KEY-----' + # To use with vault: + # ansible-playbook -i inventory --vault-id tme@vault_password_file intersight_server_profile.yml + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + # Server Profile name default + profile_name: "{{ inventory_hostname | regex_replace('-r$', '') }}" + tasks: + # + # Configure profiles specific to server (run for each server in the inventory) + # + - set_fact: + mode: Standalone + when: mode is not defined or mode == 'IntersightStandalone' + - set_fact: + mode: FIAttached + when: mode == 'Intersight' + # Get server moid when not defined in inventory + - block: + - name: "Get {{ inventory_hostname }} Server Moid" + intersight_info: + <<: *api_info + server_names: "{{ inventory_hostname }}" + register: server + - set_fact: + server_moid: "{{ server.intersight_servers[0].Moid }}" + when: server_moid is not defined + delegate_to: localhost + - name: "Get current profile assignment" + intersight_rest_api: + <<: *api_info + resource_path: /server/Profiles + query_params: + $filter: "AssignedServer.Moid eq '{{ server_moid }}' or AssociatedServer.Moid eq '{{ server_moid }}'" + when: server_moid is defined + register: profile + delegate_to: localhost + - name: "Configure {{ profile_name }} Server Profile" + intersight_server_profile: + <<: *api_info + organization: "{{ organization | default(omit) }}" + name: "{{ profile_name }}" + target_platform: "{{ mode | default(omit) }}" + description: "Updated Profile for server name {{ inventory_hostname }}" + assigned_server: "{{ server_moid }}" + when: + - server_moid is defined + - profile is not defined or profile.api_response.Moid is not defined + delegate_to: localhost diff --git a/ansible_collections/cisco/intersight/playbooks/os_install.yml b/ansible_collections/cisco/intersight/playbooks/os_install.yml new file mode 100644 index 00000000..b151a98c --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/os_install.yml @@ -0,0 +1,142 @@ +--- +# +# The hosts group used is provided by the group variable or defaulted to 'Intersight_Servers'. +# You can specify a specific host (or host group) on the command line: +# ansible-playbook ... -e group=<your host group> +# e.g., ansible-playbook server_profiles.yml -e group=TME_Demo +# +- hosts: "{{ group | default('Intersight_Servers') }}" + connection: local + collections: + - cisco.intersight + gather_facts: false + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + # if api_key vars are omitted, INTERSIGHT_API_KEY_ID, INTERSIGHT_API_PRIVATE_KEY, + # and INTERSIGHT_API_URI environment variables used for API key data + api_private_key: "{{ api_private_key | default(omit) }}" + api_key_id: "{{ api_key_id | default(omit) }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + # OS and SCU Versions + os_version: ESXi 7.0 U3 + os_config: ESXi7.0ConfigFile + scu_version: 6.2.2a + org_name: default + # + # Example using vault: + # 1. Place the vault password into a plain text file (this is the password for vault access - do not check this into any repos!) + # $ cat vault_password_file + # ... + # 2. Encrypt a string (e.g., 'notagoodpassword'). You will later decrypt using your vault password file + # $ ansible-vault encrypt_string --vault-id tme@vault_password_file 'notagoodpassword' --name 'vault_password' + # (response is the encrypting string) + # 3. Place the vault variable in your playbook (example below): + # 4. Run the playbook and supply the vault password file (used to decrypt the vaulted password in the playbook) + # $ ansible-playbook -i inventory --vault-id tme@vault_password_file os_install.yml + # + vault_password: !vault | + $ANSIBLE_VAULT;1.2;AES256;tme + 36656264656638646566313633353832396138616264313032303433656636643638363864653936 + 6532646363303435633965383432633630306566323838640a363566376234303366313064306162 + 39326331373231643333616335393232353633393834653161633032383539383537656336666639 + 3635306535366233660a356235393664653538386136626439646137626531663135363636326131 + 3538 + tasks: + # Get the Organization Moid + - name: "Get {{ org_name }} Organization Moid" + intersight_rest_api: + <<: *api_info + resource_path: /organization/Organizations + query_params: + $filter: "Name eq '{{ org_name }}'" + register: org_resp + delegate_to: localhost + # Get the OS File Moid + - name: "Get {{ os_version }} OS File Moid" + intersight_rest_api: + <<: *api_info + resource_path: /softwarerepository/OperatingSystemFiles + query_params: + $filter: "Version eq '{{ os_version }}' and PermissionResources.Moid eq '{{ org_resp.api_response.Moid }}'" + register: os_resp + delegate_to: localhost + # Get the SCU File Moid + - name: "Get {{ scu_version }} SCU File Moid" + intersight_rest_api: + <<: *api_info + resource_path: /firmware/ServerConfigurationUtilityDistributables + query_params: + $filter: "Version eq '{{ scu_version }}' and PermissionResources.Moid eq '{{ org_resp.api_response.Moid }}'" + register: scu_resp + delegate_to: localhost + # Get the OS Config File Moid + - name: "Get {{ os_config }} OS Config File Moid" + intersight_rest_api: + <<: *api_info + resource_path: /os/ConfigurationFiles + query_params: + $filter: "Name eq '{{ os_config }}'" + register: os_config_resp + delegate_to: localhost + # Install OS + - name: Install OS + intersight_rest_api: + <<: *api_info + resource_path: /bulk/Requests + update_method: post + api_body: { + "Verb": "POST", + "Uri": "/v1/os/Installs", + "Requests": [ + { + "ObjectType": "bulk.RestSubRequest", + "Body": { + "InstallMethod": "vMedia", + "Image": { + "Moid": "{{ os_resp.api_response.Moid }}", + "ObjectType": "softwarerepository.OperatingSystemFile" + }, + "OsduImage": { + "ObjectType": "firmware.ServerConfigurationUtilityDistributable", + "Moid": "{{ scu_resp.api_response.Moid }}" + }, + "OverrideSecureBoot": true, + "Organization": { + "Moid": "{{ org_resp.api_response.Moid }}" + }, + "Answers": { + "Hostname": "sjc07-r14-1-1-6", + "IpConfigType": "DHCP", + "RootPassword": "{{ vault_password }}", + "IsRootPasswordCrypted": false, + "Source": "Template", + "IpConfiguration": { + "ObjectType": "os.Ipv4Configuration" + } + }, + "ConfigurationFile": { + "Moid": "{{ os_config_resp.api_response.Moid }}", + "ObjectType": "os.ConfigurationFile" + }, + "AdditionalParameters": null, + "InstallTarget": { + "ObjectType": "os.PhysicalDisk", + "Name": "Disk 1", + "StorageControllerSlotId": "1", + "SerialNumber": "99B0A05NFJXF" + }, + "Server": { + "ObjectType": "compute.{{ object_type }}", + "Moid": "{{ server_moid }}" + } + } + } + ], + "Organization": { + "Moid": "{{ org_resp.api_response.Moid }}" + } + } + delegate_to: localhost + register: install_resp diff --git a/ansible_collections/cisco/intersight/playbooks/ova_workflow.yml b/ansible_collections/cisco/intersight/playbooks/ova_workflow.yml new file mode 100644 index 00000000..3f4eea66 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/ova_workflow.yml @@ -0,0 +1,72 @@ +--- +# Execute Orchestration Workflow +- hosts: localhost + collections: + - cisco.intersight + gather_facts: false + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + image_url: "{{ image_url | default('http://172.28.224.62/UCSPE_4.0.4e.ova') }}" + vm_name: "{{ vm_name | default('ucspe-4-0-4e-orch') }}" + tasks: + - name: Get vCenter Moid + intersight_rest_api: + <<: *api_info + resource_path: /asset/DeviceRegistrations + query_params: + $filter: DeviceIpAddress eq '172.28.225.220' + register: vcenter + - name: Execute OVA deploy workflow + intersight_rest_api: + <<: *api_info + resource_path: /workflow/WorkflowInfos + update_method: post + api_body: { + "Name": "ucspe_vm", + "Organization": { + # "Selector": "Name eq 'default'", + # "ObjectType": "organization.Organization" + "Moid": "5dde9f116972652d33539d39" + }, + "Action": "Start", + "Input": { + "Vcenter": { + "Moid": "{{ vcenter.api_response.Moid }}", + "ObjectType":"asset.DeviceRegistration" + }, + "Datastore": "Atlanta Data", + "Image": "{{ image_url }}", + "VmName": "{{ vm_name }}", + "PowerOn": false, + "Datacenter": "SJC07", + "Cluster": "Atlanta" + }, + "WorkflowDefinition": { + "Selector": "Name eq 'ucspe_vm'", + "ObjectType":"workflow.WorkflowDefinition" + }, + "WorkflowCtx": { + "InitiatorCtx": { + "InitiatorName":"ucspe_vm", + "InitiatorType":"workflow.WorkflowDefinition" + } + } + } + register: workflow + - name: Get status of OVA deploy workflow + intersight_rest_api: + <<: *api_info + resource_path: /workflow/WorkflowInfos + query_params: + $expand: ParentTaskInfo($select=WorkflowInfo;$expand=WorkflowInfo($select=WorkflowDefinition)) + $filter: "Moid eq '{{ workflow.api_response.Moid }}'" + register: status + until: status.api_response.Status != 'RUNNING' and status.api_response.Status != 'WAITING' + retries: 10 + delay: 60 + ignore_errors: true + - debug: + msg: "Final workflow status: {{ status.api_response.Status }}" diff --git a/ansible_collections/cisco/intersight/playbooks/profile_inventory b/ansible_collections/cisco/intersight/playbooks/profile_inventory new file mode 100644 index 00000000..9e6e25dc --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/profile_inventory @@ -0,0 +1,4 @@ +# example group to create profiles with no server assignment +[Intersight_Servers] +demo1 mode=Intersight +demo2 mode=Intersight diff --git a/ansible_collections/cisco/intersight/playbooks/profile_with_buckets.yml b/ansible_collections/cisco/intersight/playbooks/profile_with_buckets.yml new file mode 100644 index 00000000..4fde5991 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/profile_with_buckets.yml @@ -0,0 +1,28 @@ +--- +# Server profile config using policy buckets +- hosts: localhost + gather_facts: false + vars: + profile_name: SP-SJC07-R14-FI-1-1-6 + tasks: + - name: "Get {{ profile_name }}" + cisco.intersight.intersight_rest_api: + resource_path: /server/Profiles + query_params: + $filter: "Name eq '{{ profile_name }}'" + register: results + - debug: + msg: "{{ results.api_response.PolicyBucket | selectattr('ObjectType', 'eq', 'access.Policy') }}" + - name: "Config {{ profile_name }}" + cisco.intersight.intersight_rest_api: + resource_path: "/server/Profiles/{{ results.api_response.Moid }}/PolicyBucket" + # should be moid for tf-k8s-SJC07-R14-15-access + list_body: + [ + { + "Moid": "60a6e26f6275722d31f8e278", + "ObjectType": "access.Policy", + }, + ] + update_method: post + when: not (results.api_response.PolicyBucket | selectattr('ObjectType', 'eq', 'access.Policy') | selectattr('Moid', 'eq', '60a6e26f6275722d31f8e278')) diff --git a/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/auto_support/tasks/main.yml b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/auto_support/tasks/main.yml new file mode 100644 index 00000000..f25e8857 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/auto_support/tasks/main.yml @@ -0,0 +1,31 @@ +--- +- name: "Configure Auto Support Policy" + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + intersight_rest_api: + <<: *api_info + resource_path: /hyperflex/AutoSupportPolicies + query_params: + $filter: "Name eq '{{ hx_auto_support_policy }}'" + api_body: { + "Organization": { + "Moid": "{{ intersight_org.api_response.Moid }}" + }, + "Name": "{{ hx_auto_support_policy }}", + "AdminState":"{{ hx_auto_support_enable }}", + "ServiceTicketReceipient":"{{ hx_auto_support_receipient }}", + "ClusterProfiles": [ + { + "Moid": "{{ cluster_profile.api_response.Moid }}" + } + ] + } + register: auto_support_policy + +- debug: msg="HyperFlex Autosupport Policy named {{ hx_auto_support_policy }} has been created successfully."
\ No newline at end of file diff --git a/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/cluster_network/tasks/main.yml b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/cluster_network/tasks/main.yml new file mode 100644 index 00000000..8209117f --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/cluster_network/tasks/main.yml @@ -0,0 +1,50 @@ +--- +- name: "Configure Cluster Network Policy" + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + intersight_rest_api: + <<: *api_info + resource_path: /hyperflex/ClusterNetworkPolicies + query_params: + $filter: "Name eq '{{ hx_cluster_network_policy }}'" + api_body: { + "Organization": { + "Moid": "{{ intersight_org.api_response.Moid }}" + }, + "Name":"{{ hx_cluster_network_policy }}", + "JumboFrame":"{{ hx_jumbo_frames }}", + "KvmIpRange":{ + "StartAddr":"{{ ucs_kvm_start_ip }}", + "EndAddr":"{{ ucs_kvm_end_ip }}", + "Gateway":"{{ ucs_kvm_gateway }}", + "Netmask":"{{ ucs_kvm_netmask }}" + }, + "MacPrefixRange":{ + "StartAddr":"{{ hx_mac_start }}", + "EndAddr":"{{ hx_mac_end }}" + }, + "MgmtVlan":{ + "Name":"{{ hx_mgmt_vlan_name }}", + "VlanId":"{{ hx_mgmt_vlan_id }}" + }, + "VmMigrationVlan":{ + "Name":"{{ hx_migration_vlan_name }}", + "VlanId":"{{ hx_migration_vlan_id }}" + }, + "VmNetworkVlans":"{{ hx_guest_vm_vlans }}", + "UplinkSpeed": "default", + "ClusterProfiles": [ + { + "Moid": "{{ cluster_profile.api_response.Moid }}" + } + ] + } + register: cluster_network + +- debug: msg="HyperFlex Cluster Network Policy named {{ hx_cluster_network_policy }} has been created successfully." diff --git a/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/cluster_profile/defaults/main.yml b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/cluster_profile/defaults/main.yml new file mode 100644 index 00000000..a7a0ca66 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/cluster_profile/defaults/main.yml @@ -0,0 +1,8 @@ +--- +# Default variable values for HyperFlex Cluster Profiles +hx_mgmt_platform: FI +hx_hypervisor_type: ESXi +hx_replication_factor: 3 +hx_vdi_optimization: false +hx_disk_cleanup: false +hx_laz_autoconfig: false diff --git a/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/cluster_profile/tasks/main.yml b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/cluster_profile/tasks/main.yml new file mode 100644 index 00000000..877f41f1 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/cluster_profile/tasks/main.yml @@ -0,0 +1,33 @@ +--- +- name: "Configure Cluster Profile" + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + intersight_rest_api: + <<: *api_info + resource_path: /hyperflex/ClusterProfiles + query_params: + $filter: "Name eq '{{ hx_cluster_profile }}'" + api_body: { + "Organization": { + "Moid": "{{ intersight_org.api_response.Moid }}" + }, + "Name":"{{ hx_cluster_profile }}", + "MgmtPlatform":"{{ hx_mgmt_platform }}", + "HypervisorType":"{{ hx_hypervisor_type }}", + "MgmtIpAddress":"{{ hx_mgmt_ip }}", + "MacAddressPrefix":"{{ hx_mgmt_mac_prefix }}", + "Replication":"{{ hx_replication_factor }}", + "StorageDataVlan":{ + "Name":"{{ hx_data_vlan_name }}", + "VlanId":"{{ hx_data_vlan_id }}" + } + } + register: cluster_profile + +- debug: msg="HyperFlex Cluster Profile named {{ hx_cluster_profile }} has been created successfully." diff --git a/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/cluster_storage/tasks/main.yml b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/cluster_storage/tasks/main.yml new file mode 100644 index 00000000..efd14093 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/cluster_storage/tasks/main.yml @@ -0,0 +1,34 @@ +--- +- name: "Configure Cluster Storage Policy" + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + intersight_rest_api: + <<: *api_info + resource_path: /hyperflex/ClusterStoragePolicies + query_params: + $filter: "Name eq '{{ hx_cluster_storage_policy }}'" + api_body: { + "Organization": { + "Moid": "{{ intersight_org.api_response.Moid }}" + }, + "Name":"{{ hx_cluster_storage_policy }}", + "VdiOptimization":"{{ hx_vdi_optimization }}", + "DiskPartitionCleanup":"{{ hx_disk_cleanup }}", + "LogicalAvalabilityZoneConfig":{ + "AutoConfig":"{{ hx_laz_autoconfig }}" + }, + "ClusterProfiles": [ + { + "Moid": "{{ cluster_profile.api_response.Moid }}" + } + ] + } + register: storage_setting + +- debug: msg="HyperFlex Cluster Storage Policy named {{ hx_cluster_storage_policy }} has been created successfully." diff --git a/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/deploy/tasks/main.yml b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/deploy/tasks/main.yml new file mode 100644 index 00000000..13b598b6 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/deploy/tasks/main.yml @@ -0,0 +1,43 @@ +--- +# Get cluster profile +- name: Get Cluster Profile + vars: + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + intersight_rest_api: + <<: *api_info + resource_path: /hyperflex/ClusterProfiles + query_params: + $filter: "Name eq '{{ hx_cluster_name }}'" + register: profile +# Prompt for cluster deployment action +- name: "Prompt for deployment action" + pause: + prompt: "Set the deployment action. Valid choices are Validate, Deploy, Continue or Retry." + echo: yes + register: hx_action + run_once: true +# Set cluster deployment action +- name: Set Cluster Action + vars: + intersight_rest_api: + <<: *api_info + resource_path: /hyperflex/ClusterProfiles + query_params: + $filter: "Name eq '{{ hx_cluster_name }}'" + api_body: { + "Action": "{{ hx_action.user_input }}" + } + when: + - profile.api_response.ConfigContext.ConfigState != 'Configuring' + - profile.api_response.ConfigContext.ConfigState != 'Associated' +# Can optionally wait for subsequent tasks if needed +# register: result +# until: result.api_response.config_context.config_state == 'Associated' +# retries: 20 +# delay: 30 +- debug: msg="HyperFlex Cluster Profile deployment action has been triggered successfully." diff --git a/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/edge_cluster_network/tasks/main.yml b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/edge_cluster_network/tasks/main.yml new file mode 100644 index 00000000..7dcc57a5 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/edge_cluster_network/tasks/main.yml @@ -0,0 +1,38 @@ +--- +- name: "Configure Cluster Network Policy" + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + intersight_rest_api: + <<: *api_info + resource_path: /hyperflex/ClusterNetworkPolicies + query_params: + $filter: "Name eq '{{ hx_cluster_network_policy }}'" + api_body: { + "Organization": { + "Moid": "{{ intersight_org.api_response.Moid }}" + }, + "Name":"{{ hx_cluster_network_policy }}", + "JumboFrame":"{{ hx_jumbo_frames }}", + "MacPrefixRange":{ + "StartAddr":"{{ hx_mac_start }}", + "EndAddr":"{{ hx_mac_end }}" + }, + "MgmtVlan":{ + "VlanId":"{{ hx_mgmt_vlan_id }}" + }, + "UplinkSpeed":"{{ hx_uplink_speed }}", + "ClusterProfiles": [ + { + "Moid": "{{ cluster_profile.api_response.Moid }}" + } + ] + } + register: cluster_network + +- debug: msg="HyperFlex Cluster Network Policy named {{ hx_cluster_network_policy }} has been created successfully." diff --git a/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/edge_cluster_profile/defaults/main.yml b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/edge_cluster_profile/defaults/main.yml new file mode 100644 index 00000000..7ace5ad5 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/edge_cluster_profile/defaults/main.yml @@ -0,0 +1,6 @@ +--- +# Default variable values for HyperFlex Cluster Profiles +hx_mgmt_platform: EDGE +hx_hypervisor_type: ESXi +hx_vdi_optimization: false +hx_disk_cleanup: false diff --git a/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/edge_cluster_profile/tasks/main.yml b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/edge_cluster_profile/tasks/main.yml new file mode 100644 index 00000000..6beef6e5 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/edge_cluster_profile/tasks/main.yml @@ -0,0 +1,31 @@ +--- +- name: "Configure Cluster Profile" + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + intersight_rest_api: + <<: *api_info + resource_path: /hyperflex/ClusterProfiles + query_params: + $filter: "Name eq '{{ hx_cluster_profile }}'" + api_body: { + "Organization": { + "Moid": "{{ intersight_org.api_response.Moid }}" + }, + "Name":"{{ hx_cluster_profile }}", + "MgmtPlatform":"{{ hx_mgmt_platform }}", + "HypervisorType":"{{ hx_hypervisor_type }}", + "MgmtIpAddress":"{{ hx_mgmt_ip }}", + "MacAddressPrefix":"{{ hx_mgmt_mac_prefix }}", + "StorageDataVlan":{ + "VlanId":"{{ hx_data_vlan_id }}" + } + } + register: cluster_profile + +- debug: msg="HyperFlex Cluster Profile named {{ hx_cluster_profile }} has been created successfully." diff --git a/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/edge_cluster_storage/tasks/main.yml b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/edge_cluster_storage/tasks/main.yml new file mode 100644 index 00000000..ee984928 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/edge_cluster_storage/tasks/main.yml @@ -0,0 +1,31 @@ +--- +- name: "Configure Cluster Storage Policy" + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + intersight_rest_api: + <<: *api_info + resource_path: /hyperflex/ClusterStoragePolicies + query_params: + $filter: "Name eq '{{ hx_cluster_storage_policy }}'" + api_body: { + "Organization": { + "Moid": "{{ intersight_org.api_response.Moid }}" + }, + "Name":"{{ hx_cluster_storage_policy }}", + "VdiOptimization":"{{ hx_vdi_optimization }}", + "DiskPartitionCleanup":"{{ hx_disk_cleanup }}", + "ClusterProfiles": [ + { + "Moid": "{{ cluster_profile.api_response.Moid }}" + } + ] + } + register: storage_setting + +- debug: msg="HyperFlex Cluster Storage Policy named {{ hx_cluster_storage_policy }} has been created successfully." diff --git a/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/edge_software_version/tasks/main.yml b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/edge_software_version/tasks/main.yml new file mode 100644 index 00000000..32db3b01 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/edge_software_version/tasks/main.yml @@ -0,0 +1,30 @@ +--- +- name: "Configure Software Version Policy" + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + intersight_rest_api: + <<: *api_info + resource_path: /hyperflex/SoftwareVersionPolicies + query_params: + $filter: "Name eq '{{ hx_software_policy }}'" + api_body: { + "Organization": { + "Moid": "{{ intersight_org.api_response.Moid }}" + }, + "Name":"{{ hx_software_policy }}", + "HxdpVersion":"{{ hxdp_version }}", + "ClusterProfiles": [ + { + "Moid": "{{ cluster_profile.api_response.Moid }}" + } + ] + } + register: software_policy + +- debug: msg="HyperFlex Software Version Policy named {{ hx_software_policy }} has been created successfully." diff --git a/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/fc/tasks/main.yml b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/fc/tasks/main.yml new file mode 100644 index 00000000..073cf38e --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/fc/tasks/main.yml @@ -0,0 +1,52 @@ +--- +- name: "Configure External FC Storage Policy" + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + intersight_rest_api: + <<: *api_info + resource_path: /hyperflex/ExtFcStoragePolicies + query_params: + $filter: "Name eq '{{ hx_fc_setting_policy }}'" + api_body: { + "Organization": { + "Moid": "{{ intersight_org.api_response.Moid }}" + }, + "AdminState":"{{ hx_fc_setting_enable }}", + "Name":"{{ hx_fc_setting_policy }}", + "ExtaTraffic":{ + "Name":"{{ hx_vsan_a_name }}", + "VsanId":"{{ hx_vsan_a_id }}" + }, + "ExtbTraffic":{ + "Name":"{{ hx_vsan_b_name }}", + "VsanId":"{{ hx_vsan_b_id }}" + }, + "WwxnPrefixRange":{ + "StartAddr":"{{ hx_fc_wwxn_range_start }}", + "EndAddr":"{{ hx_fc_wwxn_range_end }}" + }, + "ClusterProfiles": [ + { + "Moid": "{{ cluster_profile.api_response.Moid }}" + } + ] + } + register: fc_settings +# Set WWXN prefix for the cluster profile when additional FC HBAs are configured +- name: "Perform Action on {{ hx_profile_name }} Profile" + intersight_rest_api: + <<: *api_info + resource_path: /hyperflex/ClusterProfiles + query_params: + $filter: "Name eq '{{ hx_cluster_name }}'" + api_body: { + "WwxnPrefix": "{{ hx_fc_wwxn_range_start }}" + } + +- debug: msg="HyperFlex External FC Storage Policy named {{ hx_fc_setting_policy }} has been created successfully." diff --git a/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/intersight_org/tasks/main.yml b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/intersight_org/tasks/main.yml new file mode 100644 index 00000000..fcef95a8 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/intersight_org/tasks/main.yml @@ -0,0 +1,17 @@ +--- +- name: "Retrieve Intersight Org" + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + intersight_rest_api: + <<: *api_info + resource_path: /organization/Organizations + query_params: + $filter: "Name eq '{{ org_name }}'" + + register: intersight_org diff --git a/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/iscsi/tasks/main.yml b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/iscsi/tasks/main.yml new file mode 100644 index 00000000..45b7548d --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/iscsi/tasks/main.yml @@ -0,0 +1,38 @@ +--- +- name: "Configure External iSCSI Storage Policy" + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + intersight_rest_api: + <<: *api_info + resource_path: /hyperflex/ExtIscsiStoragePolicies + query_params: + $filter: "Name eq '{{ hx_iscsi_setting_policy }}'" + api_body: { + "Organization": { + "Moid": "{{ intersight_org.api_response.Moid }}" + }, + "AdminState":"{{ hx_iscsi_setting_enable }}", + "Name":"{{ hx_iscsi_setting_policy }}", + "ExtaTraffic":{ + "Name":"{{ hx_iscsi_vlan_a_name }}", + "VlanId":"{{ hx_iscsi_vlan_a_id }}" + }, + "ExtbTraffic":{ + "Name":"{{ hx_iscsi_vlan_b_name }}", + "VlanId":"{{ hx_iscsi_vlan_b_id }}" + }, + "ClusterProfiles": [ + { + "Moid": "{{ cluster_profile.api_response.Moid }}" + } + ] + } + register: iscsi_settings + +- debug: msg="HyperFlex External iSCSI Storage Policy named {{ hx_iscsi_setting_policy }} has been created successfully." diff --git a/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/local_credential/tasks/main.yml b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/local_credential/tasks/main.yml new file mode 100644 index 00000000..901dac0f --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/local_credential/tasks/main.yml @@ -0,0 +1,33 @@ +--- +- name: "Configure Local Credential Policy" + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + intersight_rest_api: + <<: *api_info + resource_path: /hyperflex/LocalCredentialPolicies + query_params: + $filter: "Name eq '{{ hx_local_credential_policy }}'" + api_body: { + "Organization": { + "Moid": "{{ intersight_org.api_response.Moid }}" + }, + "Name": "{{ hx_local_credential_policy }}", + "HypervisorAdmin":"{{ hx_hypervisor_admin }}", + "FactoryHypervisorPassword":"{{ hx_hypervisor_factory_password }}", + "HypervisorAdminPwd":"{{ hx_hypervisor_password | default(omit) }}", + "HxdpRootPwd":"{{ hx_dp_root_password | default(omit) }}", + "ClusterProfiles": [ + { + "Moid": "{{ cluster_profile.api_response.Moid }}" + } + ] + } + register: local_credential + +- debug: msg="HyperFlex Local Credential Policy named {{ hx_local_credential_policy }} has been created successfully." diff --git a/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/node_config/tasks/main.yml b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/node_config/tasks/main.yml new file mode 100644 index 00000000..5910f951 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/node_config/tasks/main.yml @@ -0,0 +1,42 @@ +--- +- name: "Configure Node Configuration Policy" + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + intersight_rest_api: + <<: *api_info + resource_path: /hyperflex/NodeConfigPolicies + query_params: + $filter: "Name eq '{{ hx_node_config_policy }}'" + api_body: { + "Organization": { + "Moid": "{{ intersight_org.api_response.Moid }}" + }, + "Name":"{{ hx_node_config_policy }}", + "NodeNamePrefix":"{{ hx_node_profile_prefix }}", + "MgmtIpRange":{ + "StartAddr":"{{ esx_mgmt_ip_start }}", + "EndAddr":"{{ esx_mgmt_ip_end }}", + "Netmask":"{{ hx_mgmt_netmask }}", + "Gateway":"{{ hx_mgmt_gateway }}" + }, + "HxdpIpRange":{ + "StartAddr":"{{ hx_mgmt_vm_ip_start }}", + "EndAddr":"{{ hx_mgmt_vm_ip_end }}", + "Netmask":"{{ hx_mgmt_netmask }}", + "Gateway":"{{ hx_mgmt_gateway }}" + }, + "ClusterProfiles": [ + { + "Moid": "{{ cluster_profile.api_response.Moid }}" + } + ] + } + register: node_config + +- debug: msg="HyperFlex Node Configuration Policy named {{ hx_node_config_policy }} has been created successfully." diff --git a/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/node_profiles/tasks/main.yml b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/node_profiles/tasks/main.yml new file mode 100644 index 00000000..4c7ae35c --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/node_profiles/tasks/main.yml @@ -0,0 +1,49 @@ +--- +# Get server Moids +- name: Get server Moid + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + cisco.intersight.intersight_info: + <<: *api_info + server_names: + - "{{ item }}" + loop: "{{ hx_servers }}" + register: inventory +# Get Cluster Profile Attributes +- name: "Get HyperFlex Cluster Profile" + intersight_rest_api: + <<: *api_info + resource_path: /hyperflex/ClusterProfiles + query_params: + $filter: "Name eq '{{ hx_cluster_name }}'" + register: profile +# Assign servers and profile to node profile +- name: "Configure Node Profile" + intersight_rest_api: + <<: *api_info + resource_path: /hyperflex/NodeProfiles + query_params: + $filter: "Name eq '{{ hx_node_profile_prefix }}-{{ '%02d' % (idx + 1) }}'" + api_body: { + "Name":"{{ hx_node_profile_prefix }}-{{ '%02d' % (idx + 1) }}", + "AssignedServer": { + "Moid": "{{ item.intersight_servers[0].Moid }}", + "ObjectType": "compute.RackUnit" + }, + "ClusterProfile": { + "Moid": "{{ profile.api_response.Moid }}" + } + } + when: item.intersight_servers is not none + loop: "{{ inventory.results }}" + loop_control: + index_var: idx + label: "{{ item.intersight_servers[0].Name }}" + +- debug: msg="HyperFlex Node Profiles have been created successfully." diff --git a/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/proxy/tasks/main.yml b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/proxy/tasks/main.yml new file mode 100644 index 00000000..6023907a --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/proxy/tasks/main.yml @@ -0,0 +1,31 @@ +--- +- name: "Configure Proxy Policy" + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + intersight_rest_api: + <<: *api_info + resource_path: /hyperflex/ProxySettingPolicies + query_params: + $filter: "Name eq '{{ hx_proxy_setting_policy }}'" + api_body: { + "Organization": { + "Moid": "{{ intersight_org.api_response.Moid }}" + }, + "Name":"{{ hx_proxy_setting_policy }}", + "Hostname":"{{ hx_proxy_setting_hostname }}", + "Port":"{{ hx_proxy_setting_port }}", + "ClusterProfiles": [ + { + "Moid": "{{ cluster_profile.api_response.Moid }}" + } + ] + } + register: proxy_setting + +- debug: msg="HyperFlex Proxy Policy named {{ hx_proxy_setting_policy }} has been created successfully." diff --git a/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/software_version/tasks/main.yml b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/software_version/tasks/main.yml new file mode 100644 index 00000000..878c1bf5 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/software_version/tasks/main.yml @@ -0,0 +1,31 @@ +--- +- name: "Configure Software Version Policy" + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + intersight_rest_api: + <<: *api_info + resource_path: /hyperflex/SoftwareVersionPolicies + query_params: + $filter: "Name eq '{{ hx_software_policy }}'" + api_body: { + "Organization": { + "Moid": "{{ intersight_org.api_response.Moid }}" + }, + "Name":"{{ hx_software_policy }}", + "HxdpVersion":"{{ hxdp_version }}", + "ServerFirmwareVersion":"{{ ucs_firmware_version }}", + "ClusterProfiles": [ + { + "Moid": "{{ cluster_profile.api_response.Moid }}" + } + ] + } + register: software_policy + +- debug: msg="HyperFlex Software Version Policy named {{ hx_software_policy }} has been created successfully." diff --git a/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/sys_config/tasks/main.yml b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/sys_config/tasks/main.yml new file mode 100644 index 00000000..d5354c11 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/sys_config/tasks/main.yml @@ -0,0 +1,33 @@ +--- +- name: "Configure System Config Policy" + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + intersight_rest_api: + <<: *api_info + resource_path: /hyperflex/SysConfigPolicies + query_params: + $filter: "Name eq '{{ hx_sys_config_policy }}'" + api_body: { + "Organization": { + "Moid": "{{ intersight_org.api_response.Moid }}" + }, + "Name": "{{ hx_sys_config_policy }}", + "Timezone":"{{ hx_sys_config_timezone }}", + "DnsServers":"{{ hx_sys_config_dns_servers }}", + "NtpServers":"{{ hx_sys_config_ntp_servers }}", + "DnsDomainName":"{{ hx_sys_config_dns_domain }}", + "ClusterProfiles": [ + { + "Moid": "{{ cluster_profile.api_response.Moid }}" + } + ] + } + register: sys_config + +- debug: msg="HyperFlex System Config Policy named {{ hx_sys_config_policy }} has been created successfully." diff --git a/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/vcenter/tasks/main.yml b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/vcenter/tasks/main.yml new file mode 100644 index 00000000..d7720e13 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/roles/policies/hyperflex_policies/vcenter/tasks/main.yml @@ -0,0 +1,33 @@ +--- +- name: "Configure vCenter Config Policy" + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + intersight_rest_api: + <<: *api_info + resource_path: /hyperflex/VcenterConfigPolicies + query_params: + $filter: "Name eq '{{ hx_vcenter_config_policy }}'" + api_body: { + "Organization": { + "Moid": "{{ intersight_org.api_response.Moid }}" + }, + "Name":"{{ hx_vcenter_config_policy }}", + "Hostname":"{{ hx_vcenter_hostname }}", + "Username":"{{ hx_vcenter_username }}", + "Password":"{{ hx_vcenter_password | default(omit) }}", + "DataCenter":"{{ hx_vcenter_datacenter }}", + "ClusterProfiles": [ + { + "Moid": "{{ cluster_profile.api_response.Moid }}" + } + ] + } + register: vcenter + +- debug: msg="HyperFlex vCenter Config Policy named {{ hx_vcenter_config_policy }} has been created successfully." diff --git a/ansible_collections/cisco/intersight/playbooks/roles/policies/server_policies/tasks/main.yml b/ansible_collections/cisco/intersight/playbooks/roles/policies/server_policies/tasks/main.yml new file mode 100644 index 00000000..427b45d5 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/roles/policies/server_policies/tasks/main.yml @@ -0,0 +1,40 @@ +--- +- name: "Configure {{ api_body.Name }} Server Policy" + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + cisco.intersight.intersight_rest_api: + <<: *api_info + resource_path: "{{ resource_path }}" + query_params: + $filter: "Name eq '{{ api_body.Name }}'" + api_body: "{{ api_body }}" + register: policy_resp +# Append profile_resp list to policy +- block: + # Create a list of all host's profile Moids + - set_fact: + # See the Ansible docs on json_query for info on how the Moid data is being extracted + profile_list: "{{ ansible_play_hosts | map('extract', hostvars, 'profile_resp') | list | json_query(moid_query) }}" + vars: + moid_query: "[*].api_response.{Moid: Moid, ObjectType: 'server.Profile'}" + - name: "Update Server Profiles used by {{ api_body.Name }} Server Policy (change may always be reported)" + cisco.intersight.intersight_rest_api: + <<: *api_info + resource_path: "{{ resource_path }}" + query_params: + $filter: "Name eq '{{ api_body.Name }}'" + api_body: { + "Profiles": "{{ profile_list + policy_resp.api_response.Profiles }}" + } + # Do not update if the profile isn't available + when: + - profile_resp is defined + - profile_resp.api_response.Moid is defined + - policy_resp is defined + - policy_resp.api_response.Profiles is defined diff --git a/ansible_collections/cisco/intersight/playbooks/roles/servers/actions/tasks/main.yml b/ansible_collections/cisco/intersight/playbooks/roles/servers/actions/tasks/main.yml new file mode 100644 index 00000000..0fb1ae9c --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/roles/servers/actions/tasks/main.yml @@ -0,0 +1,30 @@ +--- +- name: "Configure {{ inventory_hostname }} power state" + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + cisco.intersight.intersight_rest_api: + <<: *api_info + resource_path: /compute/ServerSettings + query_params: + $filter: "Server.Moid eq '{{ server_moid }}'" + api_body: { + "AdminPowerState": "{{ power_state }}" + } + when: power_state is defined +# Configure LED locator state +- name: "Configure {{ inventory_hostname }} locator state" + cisco.intersight.intersight_rest_api: + <<: *api_info + resource_path: /compute/ServerSettings + query_params: + $filter: "Server.Moid eq '{{ server_moid }}'" + api_body: { + "AdminLocatorLedState": "{{ locator_state }}" + } + when: locator_state is defined diff --git a/ansible_collections/cisco/intersight/playbooks/server_actions.yml b/ansible_collections/cisco/intersight/playbooks/server_actions.yml new file mode 100644 index 00000000..87b2477e --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/server_actions.yml @@ -0,0 +1,23 @@ +--- +# +# The hosts group used is provided by the group variable or defaulted to 'Intersight_Servers'. +# You can specify a specific host (or host group) on the command line: +# ansible-playbook ... -e group=<your host group> +# e.g., ansible-playbook server_profiles.yml -e group=TME_Demo +# +- hosts: "{{ group | default('Intersight_Servers') }}" + connection: local + gather_facts: false + tasks: + - import_role: + name: servers/actions + vars: + # power and reset state + # options: Policy, PowerOn, PowerOff, PowerCycle, HardReset, Shutdown, Reboot + # Can override on the command line: ansible-playbook ... -e power_state=PowerCycle + power_state: PowerOn + # led locator state + # options: On, Off, None + # Can override on the command line: ansible-playbook ... -e locator_state=Off + # locator_state: On + delegate_to: localhost diff --git a/ansible_collections/cisco/intersight/playbooks/server_firmware.yml b/ansible_collections/cisco/intersight/playbooks/server_firmware.yml new file mode 100644 index 00000000..6888fa78 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/server_firmware.yml @@ -0,0 +1,115 @@ +--- +# +# The hosts group used is provided by the group variable or defaulted to 'Intersight_Servers'. +# You can specify a specific host (or host group) on the command line: +# ansible-playbook ... -e group=<your host group> +# e.g., ansible-playbook server_profiles.yml -e group=TME_Demo +# +- hosts: "{{ group | default('Intersight_Servers') }}" + connection: local + gather_facts: false + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + fw_version: 4.1(2b) + file_share: 172.28.224.77/mnt/SHARE/ISOS/HUU + tasks: + # Edit FW to be used as needed for server type below + - set_fact: + file_name: "ucs-c220m4-huu-{{ fw_version | replace('(','.') | replace(')','') }}.iso" + supported_models: + - UCSC-C220-M4L + - UCSC-C220-M4S + when: model is search("UCSC-C220-M4.*") + - set_fact: + file_name: "ucs-c240m4-huu-{{ fw_version | replace('(','.') | replace(')','') }}.iso" + supported_models: + - UCSC-C240-M4L + - UCSC-C240-M4S + - UCSC-C240-M4SX + - UCSC-C240-M4SNEBS + - UCSC-C240-M4S2 + when: model is search("UCSC-C240-M4.*") + - set_fact: + file_name: "ucs-c240m5-huu-{{ fw_version | replace('(','.') | replace(')','') }}.iso" + supported_models: + - UCSC-C240-M5S + - UCSC-C240-M5L + - UCSC-C240-M5SX + - UCSC-C240-M5SN + - UCSC-C240-M5SD + - HX240C-M5SX + - HXAF240C-M5SX + - HX240C-M5L + - HX240C-M5SD + - HXAF240C-M5SD + when: model is search("UCSC-C240-M5.*") + - set_fact: + file_name: "ucs-c220m5-huu-{{ fw_version | replace('(','.') | replace(')','') }}.iso" + supported_models: + - UCSC-C220-M5SX + - UCSC-C220-M5L + - UCSC-C220-M5SN + - HX220C-M5SX + - HXAF220C-M5SX + when: model is search("UCSC-C220-M5.*") + - set_fact: + file_location: "{{ file_share }}/{{ file_name }}" + # Set the distributable type based on the management mode and server type + - set_fact: + dist_type: STANDALONE + when: mode == 'Intersight' or mode == 'IntersightStandalone' + # Get a user defined FW version + - name: Get Moid of user defined FW version + intersight_rest_api: + <<: *api_info + resource_path: /firmware/Distributables + query_params: + $filter: "FileLocation eq '{{ file_location }}'" + update_method: post + api_body: { + "Catalog": { + "Moid": "5cd993686567612d30aaa762" + }, + "ImportAction": "None", + "Name": "{{ file_name }}", + "Origin": "User", + "Source": { + "ObjectType": "softwarerepository.NfsServer", + "FileLocation": "{{ file_location }}" + }, + "SupportedModels": "{{ supported_models }}", + "Version": "{{ fw_version }}" + } + delegate_to: localhost + register: fw_resp + - name: Update server firmware + cisco.intersight.intersight_rest_api: + <<: *api_info + resource_path: /firmware/Upgrades + query_params: + $filter: "Server.Moid eq '{{ server_moid }}'" + update_method: post + # nw_upgrade_full supported in UI, nw_upgrade_mount_only has partial API support + api_body: { + "UpgradeType": "network_upgrade", + "Distributable": { + "Moid": "{{ fw_resp.api_response.Moid }}" + }, + "DirectDownload": {}, + "NetworkShare": { + "Upgradeoption": "nw_upgrade_mount_only", + "MapType":"nfs" + }, + "Server": { + "ObjectType": "compute.{{ object_type }}", + "Moid": "{{ server_moid }}" + } + } + delegate_to: localhost + when: server_moid is defined diff --git a/ansible_collections/cisco/intersight/playbooks/servers_to_file.yml b/ansible_collections/cisco/intersight/playbooks/servers_to_file.yml new file mode 100644 index 00000000..72c88783 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/servers_to_file.yml @@ -0,0 +1,14 @@ +--- +# Update standalone servers (IMC) in the file +- lineinfile: + path: "{{ filepath }}" + insertafter: "^\\[{{ host_group }}\\]" + regexp: "^{{ item.Name }} " + # Each line of the inventory has the following: + # Name server_moid=<Moid value> model=<Model value> ... + line: "{{ item.Name }} server_moid={{ item.Moid }} model={{ item.Model }}" + create: true + loop: "{{ outer_item.api_response }}" + loop_control: + label: "{{ item.Name }}" + when: outer_item.api_response is defined and outer_item.api_response diff --git a/ansible_collections/cisco/intersight/playbooks/update_all_inventory.yml b/ansible_collections/cisco/intersight/playbooks/update_all_inventory.yml new file mode 100644 index 00000000..f2b59349 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/update_all_inventory.yml @@ -0,0 +1,57 @@ +--- +# +# Summary: Auto generate (or update) the Ansible inventory file with all servers (Name and Moid or each discovered server) +# +# The hosts group used is provided by the group variable or defaulted to 'Intersight'. +# You can specify a specific host (or host group) on the command line: +# ansible-playbook ... -e group=<your host group> +# e.g., ansible-playbook server_profiles.yml -e group=TME_Demo +# +# This playbook only runs once (and not for each server in the inventory), but the hosts group is used to get API key info +# +- hosts: "{{ group | default('Intersight') }}" + connection: local + gather_facts: false + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + # if api_key vars are omitted, INTERSIGHT_API_KEY_ID, INTERSIGHT_API_PRIVATE_KEY, + # and INTERSIGHT_API_URI environment variables used for API key data + api_private_key: "{{ api_private_key | default(omit) }}" + api_key_id: "{{ api_key_id | default(omit) }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + # Change filepath if you want to update a different inventory file + filepath: "{{ inventory_file }}" + # Change host_group if you want to use another group name for your servers in the created inventory + host_group: Intersight_Servers + tasks: + # Enclose tasks in a block that is only run once + - block: + # Find all servers + - cisco.intersight.intersight_info: + <<: *api_info + server_names: + register: all_results + # Place the servers in a group in the file + - debug: + msg: Inventory filepath "{{ filepath }}" + - lineinfile: + path: "{{ filepath }}" + line: "[{{ host_group }}]" + create: true + # Update servers in the file + - lineinfile: + path: "{{ filepath }}" + insertafter: "^\\[{{ host_group }}\\]" + regexp: "^{{ item.Name }} serial={{ item.Serial }} " + # Each line of the inventory has the following: + line: "{{ item.Name }} serial={{ item.Serial }} server_moid={{ item.Moid }} model={{ item.Model }} mode={{ item.ManagementMode }} object_type={{ item.SourceObjectType | regex_replace('compute.')}}" + create: true + loop: "{{ all_results.intersight_servers }}" + loop_control: + label: "{{ item.Name }}" + when: all_results.intersight_servers is defined + delegate_to: localhost + run_once: true diff --git a/ansible_collections/cisco/intersight/playbooks/update_hx_edge_inventory.yml b/ansible_collections/cisco/intersight/playbooks/update_hx_edge_inventory.yml new file mode 100644 index 00000000..6bc855ab --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/update_hx_edge_inventory.yml @@ -0,0 +1,61 @@ +--- +# +# Summary: Auto generate (or update) the Ansible inventory file with all servers (Name and Moid or each discovered server) +# +# The hosts group used is provided by the group variable or defaulted to 'Intersight'. +# You can specify a specific host (or host group) on the command line: +# ansible-playbook ... -e group=<your host group> +# e.g., ansible-playbook server_profiles.yml -e group=TME_Demo +# +# This playbook only runs once (and not for each server in the inventory), but the hosts group is used to get API key info +# +- hosts: "{{ group | default('Intersight') }}" + connection: local + gather_facts: false + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + # Change filepath if you want to update a different inventory file + filepath: "{{ inventory_file }}" + # Change host_group if you want to use another group name for your servers in the created inventory + host_group: Intersight_Servers + tasks: + # Enclose tasks in a block that is only run once + - block: + # Find all servers + - cisco.intersight.intersight_info: + <<: *api_info + server_names: + register: all_results + # Place the servers in a group in the file + - debug: + msg: Inventory filepath "{{ filepath }}" + - lineinfile: + path: "{{ filepath }}" + line: "[{{ host_group }}]" + create: true + # Update servers in the file + - lineinfile: + path: "{{ filepath }}" + insertafter: "^\\[{{ host_group }}\\]" + regexp: "^{{ item.Name }} serial={{ item.Serial }} " + # Each line of the inventory has the following: + # Name server_moid=<Moid value> model=<Model value> boot_policy=<policy from tag> | 'na' + line: "{{ item.Name }} serial={{ item.Serial }} server_moid={{ item.Moid }} model={{ item.Model }}" + create: true + # Ansible and jmespath contains have type differences, so to/from_json used + loop: "{{ all_results.intersight_servers | json_query(platform_query) | to_json | from_json | json_query(model_query) }}" + loop_control: + label: "{{ item.Name }}" + vars: + # Filter for IMC and C-Series only (no HX) + platform_query: "[?PlatformType=='IMC']" + model_query: "[?contains(Model, 'HX')]" + when: all_results.intersight_servers is defined + delegate_to: localhost + run_once: true diff --git a/ansible_collections/cisco/intersight/playbooks/update_hx_inventory.yml b/ansible_collections/cisco/intersight/playbooks/update_hx_inventory.yml new file mode 100644 index 00000000..cb3e25ac --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/update_hx_inventory.yml @@ -0,0 +1,61 @@ +--- +# +# Summary: Auto generate (or update) the Ansible inventory file with all servers (Name and Moid or each discovered server) +# +# The hosts group used is provided by the group variable or defaulted to 'Intersight'. +# You can specify a specific host (or host group) on the command line: +# ansible-playbook ... -e group=<your host group> +# e.g., ansible-playbook server_profiles.yml -e group=TME_Demo +# +# This playbook only runs once (and not for each server in the inventory), but the hosts group is used to get API key info +# +- hosts: "{{ group | default('Intersight') }}" + connection: local + gather_facts: false + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + api_private_key: "{{ api_private_key }}" + api_key_id: "{{ api_key_id }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + # Change filepath if you want to update a different inventory file + filepath: "{{ inventory_file }}" + # Change host_group if you want to use another group name for your servers in the created inventory + host_group: Intersight_Servers + tasks: + # Enclose tasks in a block that is only run once + - block: + # Find all servers + - cisco.intersight.intersight_info: + <<: *api_info + server_names: + register: all_results + # Place the servers in a group in the file + - debug: + msg: Inventory filepath "{{ filepath }}" + - lineinfile: + path: "{{ filepath }}" + line: "[{{ host_group }}]" + create: true + # Update servers in the file + - lineinfile: + path: "{{ filepath }}" + insertafter: "^\\[{{ host_group }}\\]" + regexp: "^{{ item.Name }} serial={{ item.Serial }} " + # Each line of the inventory has the following: + # Name server_moid=<Moid value> model=<Model value> boot_policy=<policy from tag> | 'na' + line: "{{ item.Name }} serial={{ item.Serial }} server_moid={{ item.Moid }} model={{ item.Model }}" + create: true + # Ansible and jmespath contains have type differences, so to/from_json used + loop: "{{ all_results.intersight_servers | json_query(platform_query) | to_json | from_json | json_query(model_query) }}" + loop_control: + label: "{{ item.Name }}" + vars: + # Filter for IMC and C-Series only (no HX) + platform_query: "[?PlatformType=='UCSFI']" + model_query: "[?contains(Model, 'HX')]" + when: all_results.intersight_servers is defined + delegate_to: localhost + run_once: true diff --git a/ansible_collections/cisco/intersight/playbooks/update_standalone_inventory.yml b/ansible_collections/cisco/intersight/playbooks/update_standalone_inventory.yml new file mode 100644 index 00000000..bd0bc166 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/update_standalone_inventory.yml @@ -0,0 +1,67 @@ +--- +# +# Summary: Auto generate (or update) the Ansible inventory file with Standalone C-Series servers (Name and Moid or each discovered server) +# +# The hosts group used is provided by the group variable or defaulted to 'Intersight'. +# You can specify a specific host (or host group) on the command line: +# ansible-playbook ... -e group=<your host group> +# e.g., ansible-playbook server_profiles.yml -e group=TME_Demo +# +# This playbook only runs once (and not for each server in the inventory), but the hosts group is used to get API key info +# +- hosts: "{{ group | default('Intersight') }}" + connection: local + gather_facts: false + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + # if api_key vars are omitted, INTERSIGHT_API_KEY_ID, INTERSIGHT_API_PRIVATE_KEY, + # and INTERSIGHT_API_URI environment variables used for API key data + api_private_key: "{{ api_private_key | default(omit) }}" + api_key_id: "{{ api_key_id | default(omit) }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + # How many results to return per inventory file + per_page: 500 + # Total servers to query + max_servers: 15000 + # Change filepath if you want to update a different inventory file + filepath: "{{ inventory_file }}" + # Change host_group if you want to use another group name for your servers in the created inventory + host_group: Intersight_Servers + tasks: + # Enclose tasks in a block that is only run once + - block: + # Set an api response for the 1st loop iteration + - set_fact: + servers: + api_response: + - Moid: fake + run_once: true + # Find all servers + - cisco.intersight.intersight_rest_api: + <<: *api_info + resource_path: /compute/PhysicalSummaries + query_params: + $filter: "ManagementMode eq 'IntersightStandalone' and contains(Model, 'UCSC-C')" + $select: Name,Model,Serial + $top: "{{ per_page }}" + $skip: "{{ item }}" + return_list: true + loop: "{{ range(0, max_servers|int, per_page|int) | list }}" + register: servers + when: servers.api_response + # Place the servers in a group in the file + - debug: + msg: Inventory filepath "{{ filepath }}" + - lineinfile: + path: "{{ filepath }}" + line: "[{{ host_group }}]" + create: true + - include_tasks: servers_to_file.yml + loop: "{{ servers.results }}" + loop_control: + loop_var: outer_item + delegate_to: localhost + run_once: true diff --git a/ansible_collections/cisco/intersight/playbooks/vault_intersight_server_profile.yml b/ansible_collections/cisco/intersight/playbooks/vault_intersight_server_profile.yml new file mode 100644 index 00000000..7a9fd555 --- /dev/null +++ b/ansible_collections/cisco/intersight/playbooks/vault_intersight_server_profile.yml @@ -0,0 +1,88 @@ +--- +# +# Configure Server Profiles +# +# The hosts group used is provided by the group variable or defaulted to 'Intersight_Servers'. +# You can specify a specific host (or host group) on the command line: +# ansible-playbook ... -e group=<your host group> +# e.g., ansible-playbook server_profiles.yml -e group=TME_Demo +# +- hosts: "{{ group | default('Intersight_Servers') }}" + connection: local + collections: + - cisco.intersight + gather_facts: false + vars: + # Create an anchor for api_info that can be used throughout the file + api_info: &api_info + # Key can be directly specified, and vault should be used to encrypt: + # Ex. ansible-vault encrypt_string --vault-id tme@/Users/dsoper/Documents/vault_password_file '-----BEGIN EC PRIVATE KEY----- + # <your private key data> + # -----END EC PRIVATE KEY-----' + # To use with vault: + # ansible-playbook -i inventory --vault-id tme@vault_password_file intersight_server_profile.yml + api_private_key: !vault | + $ANSIBLE_VAULT;1.2;AES256;tme + 34376535353966373536386366646435643735636364373163343365623465343466393338386331 + 3135633161333861386265393631616237623236643263620a613363396362386631613863643364 + 65376635316232613561373761363633633034346138366165356561666462333562643065393332 + 6631363239333332640a376632376434366461393039663530386161313864633265353839636337 + 39393939363535376566333565666537666137366537396639623633643665363066646161633833 + 35656430366665336334383435326239316333323631306237626432636361356166383466656362 + 36626566643637366264393933353038653062373035306338663730383739336530313664646162 + 30623337383832306665356433346331656164366638633563396532313463643032366537666639 + 32383230633135373764623733653261326536626561656462343565613535386331643365343738 + 62623631383135623539393538396435623064306636323165623661633466373664326130396663 + 31333163643763616263623566353565363030383761366566613036616163343530663362313131 + 32643737653063383330356436303437383966366163383461376236363563313264303833653631 + 62613432303536386630646166346262636566303563646337653166303937333134356537656630 + 39303363383262376237366565346638336139346363383634623333356639616538303366616634 + 35666439356634353530363566313864333966386263623566323564656366356264313166353038 + 66643566313361636231616338633939323131643061646664396264366538386230366364326633 + 3831 + api_key_id: "{{ api_key_id | default(omit) }}" + api_uri: "{{ api_uri | default(omit) }}" + validate_certs: "{{ validate_certs | default(omit) }}" + state: "{{ state | default(omit) }}" + # Server Profile name default + profile_name: "SP-{{ inventory_hostname }}" + tasks: + # + # Configure profiles specific to server (run for each server in the inventory) + # + - set_fact: + mode: Standalone + when: mode is not defined or mode == 'IntersightStandalone' + - set_fact: + mode: FIAttached + when: mode == 'Intersight' + # Get server moid when not defined in inventory + - block: + - name: "Get {{ inventory_hostname }} Server Moid" + cisco.intersight.intersight_info: + <<: *api_info + server_names: "{{ inventory_hostname }}" + register: server + - set_fact: + server_moid: "{{ server.intersight_servers[0].Moid }}" + when: server_moid is not defined + delegate_to: localhost + - name: "Configure {{ profile_name }} Server Profile" + intersight_server_profile: + <<: *api_info + organization: "{{ organization | default(omit) }}" + name: "{{ profile_name }}" + target_platform: "{{ mode | default(omit) }}" + tags: + - Key: Site + Value: SJC02 + description: "Updated Profile for server name {{ inventory_hostname }}" + assigned_server: "{{ server_moid }}" + boot_order_policy: "{{ boot_order_policy | default(omit) }}" + imc_access_policy: "{{ imc_access_policy | default(omit) }}" + lan_connectivity_policy: "{{ lan_connectivity_policy | default(omit) }}" + local_user_policy: "{{ local_user_policy | default(omit) }}" + ntp_policy: "{{ ntp_policy | default(omit) }}" + virtual_media_policy: "{{ virtual_media_policy | default(omit) }}" + when: server_moid is defined + delegate_to: localhost |