summaryrefslogtreecommitdiffstats
path: root/docs/labs/lab06-provisioning
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2023-05-11 09:04:53 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2023-05-13 09:34:21 +0000
commit47553c43d71b7b1144f912ab9679f5b60e858fa2 (patch)
tree08378beaeeea8f9bb2686d3037c7b6f5062bb948 /docs/labs/lab06-provisioning
parentInitial commit. (diff)
downloadcvprac-upstream/1.3.1+dfsg.tar.xz
cvprac-upstream/1.3.1+dfsg.zip
Adding upstream version 1.3.1+dfsg.upstream/1.3.1+dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'docs/labs/lab06-provisioning')
-rw-r--r--docs/labs/lab06-provisioning/atd_e2e_provisioning_workflow.py120
-rw-r--r--docs/labs/lab06-provisioning/auto_reconcile_on_rc_change.py64
-rw-r--r--docs/labs/lab06-provisioning/change_control_custom_rapi.py81
-rw-r--r--docs/labs/lab06-provisioning/change_control_workflow.py27
-rw-r--r--docs/labs/lab06-provisioning/change_control_workflow_rapi.py40
-rw-r--r--docs/labs/lab06-provisioning/configlets/AVD_leaf1.cfg255
-rw-r--r--docs/labs/lab06-provisioning/configlets/AVD_leaf2.cfg255
-rw-r--r--docs/labs/lab06-provisioning/configlets/AVD_leaf3.cfg255
-rw-r--r--docs/labs/lab06-provisioning/configlets/AVD_leaf4.cfg255
-rw-r--r--docs/labs/lab06-provisioning/configlets/AVD_spine1.cfg129
-rw-r--r--docs/labs/lab06-provisioning/configlets/AVD_spine2.cfg129
-rw-r--r--docs/labs/lab06-provisioning/gen_builder.py63
-rw-r--r--docs/labs/lab06-provisioning/mlag_issu.py220
-rw-r--r--docs/labs/lab06-provisioning/move_device.py24
-rw-r--r--docs/labs/lab06-provisioning/vc_task_retrigger.py115
15 files changed, 2032 insertions, 0 deletions
diff --git a/docs/labs/lab06-provisioning/atd_e2e_provisioning_workflow.py b/docs/labs/lab06-provisioning/atd_e2e_provisioning_workflow.py
new file mode 100644
index 0000000..8d4445f
--- /dev/null
+++ b/docs/labs/lab06-provisioning/atd_e2e_provisioning_workflow.py
@@ -0,0 +1,120 @@
+# Copyright (c) 2021 Arista Networks, Inc.
+# Use of this source code is governed by the Apache License 2.0
+# that can be found in the COPYING file.
+
+# This script is an example on provisioning registered devices in CloudVision that is based on
+# Arista Test Drive (ATD) and similar to what the ansible playbooks do in
+# https://github.com/arista-netdevops-community/atd-avd.
+# It does the following:
+# - creates and uploads configlets,
+# - creates the container hierarchy in Network Provisiong
+# - moves the devices to their target containers
+# - assigns the configlets to the devices
+# - creates a change control from the genereated tasks
+# - approves and executes the change control
+
+import uuid
+import time
+import ssl
+from datetime import datetime
+from cvprac.cvp_client import CvpClient
+ssl._create_default_https_context = ssl._create_unverified_context
+
+# Create connection to CloudVision
+clnt = CvpClient()
+clnt.connect(['cvp1'],'username', 'password')
+
+# Create container topology
+container_name = "DC1_LEAFS"
+container_topology = [{"containerName": "ATD_FABRIC", "parentContainerName": 'Tenant'},
+ {"containerName": "ATD_LEAFS", "parentContainerName": 'ATD_FABRIC'},
+ {"containerName": "pod1", "parentContainerName": 'ATD_LEAFS'},
+ {"containerName": "pod2", "parentContainerName": 'ATD_LEAFS'},
+ {"containerName": "ATD_SERVERS", "parentContainerName": 'ATD_FABRIC'},
+ {"containerName": "ATD_SPINES", "parentContainerName": 'ATD_FABRIC'},
+ {"containerName": "ATD_TENANT_NETWORKS", "parentContainerName": 'ATD_FABRIC'}]
+for container in container_topology:
+ try:
+ container_name = container['containerName']
+ # Get parent container information
+ parent = clnt.api.get_container_by_name(container['parentContainerName'])
+ print(f'Creating container {container_name}\n')
+ clnt.api.add_container(container_name,parent["name"],parent["key"])
+ except Exception as e:
+ if "Data already exists in Database" in str(e):
+ print ("Container already exists, continuing...")
+
+# Create device mappers
+devices = [{'deviceName': "leaf1",
+ 'configlets': ["BaseIPv4_Leaf1", "AVD_leaf1"],
+ "parentContainerName": "pod1"},
+ {'deviceName': "leaf2",
+ 'configlets': ["BaseIPv4_Leaf2", "AVD_leaf2"],
+ "parentContainerName": "pod1"},
+ {'deviceName': "leaf3",
+ 'configlets': ["BaseIPv4_Leaf3", "AVD_leaf3"],
+ "parentContainerName": "pod2"},
+ {'deviceName': "leaf4",
+ 'configlets': ["BaseIPv4_Leaf4", "AVD_leaf4"],
+ "parentContainerName": "pod2"},
+ {'deviceName': "spine1",
+ 'configlets': ["BaseIPv4_Spine1", "AVD_spine1"],
+ "parentContainerName": "ATD_SPINES"},
+ {'deviceName': "spine2",
+ 'configlets': ["BaseIPv4_Spine2", "AVD_spine2"],
+ "parentContainerName": "ATD_SPINES"}]
+
+task_list = []
+for device in devices:
+ # Load the AVD configlets from file
+ with open("./configlets/AVD_" + device['deviceName'] + ".cfg", "r") as file:
+ configlet_file = file.read()
+ avd_configlet_name = device['configlets'][1]
+ base_configlet_name = device['configlets'][0] # preloaded configlet in an ATD environment
+ container_name = device['parentContainerName']
+ base_configlet = clnt.api.get_configlet_by_name(base_configlet_name)
+ configlets = [base_configlet]
+ # Update the AVD configlets if they exist, otherwise upload them from the configlets folder
+ print (f"Creating configlet {avd_configlet_name} for {device['deviceName']}\n")
+ try:
+ configlet = clnt.api.get_configlet_by_name(avd_configlet_name)
+ clnt.api.update_configlet(configlet_file, configlet['key'], avd_configlet_name)
+ configlets.append(configlet)
+ except:
+ clnt.api.add_configlet(avd_configlet_name, configlet_file)
+ configlet = clnt.api.get_configlet_by_name(avd_configlet_name)
+ configlets.append(configlet)
+ # Get device data
+ device_data = clnt.api.get_device_by_name(device['deviceName'] + ".atd.lab")
+ # Get the parent container data for the device
+ container = clnt.api.get_container_by_name(container_name)
+ device_name = device['deviceName']
+ print(f"Moving device {device_name} to container {container_name}\n")
+ # The move action will create the task first, however if the devices are already in the target
+ # container, for instance if the script was run multiple times than the move action will
+ # not generate a task anymore, therefore it's better to create the task list from the
+ # Update Config action which will reuse the Move Device action's task if one exists,
+ # otherwise will create a new one.
+ move = clnt.api.move_device_to_container("python", device_data, container)
+ apply_configlets = clnt.api.apply_configlets_to_device("", device_data, configlets)
+ task_list = task_list + apply_configlets['data']['taskIds']
+
+print(f"Generated task IDs are: {task_list}\n")
+
+# Generate unique ID for the change control
+cc_id = str(uuid.uuid4())
+cc_name = f"Change_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
+
+print("Creating Change control with the list of tasks")
+clnt.api.change_control_create_for_tasks(cc_id, cc_name, task_list, series=False)
+
+print("Approving Change Control")
+# adding a few seconds sleep to avoid small time diff between the local system and CVP
+time.sleep(2)
+approve_note = "Approving CC via cvprac"
+clnt.api.change_control_approve(cc_id, notes=approve_note)
+
+# Start the change control
+print("Executing Change Control...")
+start_note = "Start the CC via cvprac"
+clnt.api.change_control_start(cc_id, notes=start_note)
diff --git a/docs/labs/lab06-provisioning/auto_reconcile_on_rc_change.py b/docs/labs/lab06-provisioning/auto_reconcile_on_rc_change.py
new file mode 100644
index 0000000..cff820d
--- /dev/null
+++ b/docs/labs/lab06-provisioning/auto_reconcile_on_rc_change.py
@@ -0,0 +1,64 @@
+# Copyright (c) 2022 Arista Networks, Inc.
+# Use of this source code is governed by the Apache License 2.0
+# that can be found in the COPYING file.
+
+# This script can be run as a cronjob to periodically reconcile all devices
+# that are out of configuration compliance in environments where the running-config
+# is still modified via the CLI often.
+from cvprac.cvp_client import CvpClient
+import ssl
+from datetime import datetime
+ssl._create_default_https_context = ssl._create_unverified_context
+import requests.packages.urllib3
+requests.packages.urllib3.disable_warnings()
+clnt = CvpClient()
+clnt.set_log_level(log_level='WARNING')
+
+# Reading the service account token from a file
+with open("token.tok") as f:
+ token = f.read().strip('\n')
+
+clnt = CvpClient()
+clnt.connect(nodes=['cvp1'], username='',password='',api_token=token)
+
+inventory = clnt.api.get_inventory()
+
+compliance = {"0001": "Config is out of sync",
+ "0003": "Config & image out of sync",
+ "0004": "Config, Image and Device time are in sync",
+ "0005": "Device is not reachable",
+ "0008": "Config, Image and Extensions are out of sync",
+ "0009": "Config and Extensions are out of sync",
+ "0012": "Config, Image, Extension and Device time are out of sync",
+ "0013": "Config, Image and Device time are out of sync",
+ "0014": "Config, Extensions and Device time are out of sync",
+ "0016": "Config and Device time are out of sync"
+ }
+
+non_compliants = []
+taskIds = []
+for device in inventory:
+ if device['complianceCode'] in compliance.keys():
+ # create a list of non-compliant devices for reporting purposes
+ non_compliants.append(device['hostname'])
+ dev_mac = device['systemMacAddress']
+ # check if device already has reconciled config and save the key if it does
+ try:
+ configlets = clnt.api.get_configlets_by_device_id(dev_mac)
+ for configlet in configlets:
+ if configlet['reconciled'] == True:
+ configlet_key = configlet['key']
+ break
+ else:
+ configlet_key = ""
+ rc = clnt.api.get_device_configuration(dev_mac)
+ name = 'RECONCILE_' + device['serialNumber']
+ update = clnt.api.update_reconcile_configlet(dev_mac, rc, configlet_key, name, True)
+ # if the device had no reconciled config, it means we need to append the reconciled
+ # configlet to the list of applied configlets on the device
+ if configlet_key == "":
+ addcfg = clnt.api.apply_configlets_to_device("auto-reconciling",device,[update['data']])
+ clnt.api.cancel_task(addcfg['data']['taskIds'][0])
+ except Exception as e:
+ continue
+print(f"The non compliant devices were: {str(non_compliants)}")
diff --git a/docs/labs/lab06-provisioning/change_control_custom_rapi.py b/docs/labs/lab06-provisioning/change_control_custom_rapi.py
new file mode 100644
index 0000000..0290af7
--- /dev/null
+++ b/docs/labs/lab06-provisioning/change_control_custom_rapi.py
@@ -0,0 +1,81 @@
+# Copyright (c) 2021 Arista Networks, Inc.
+# Use of this source code is governed by the Apache License 2.0
+# that can be found in the COPYING file.
+#
+# NOTE: The following example is using the new Change Control Resource APIs supported in 2021.2.0 or newer and in CVaaS.
+# For CVaaS service-account token based auth has to be used.
+
+from cvprac.cvp_client import CvpClient
+import ssl
+import uuid
+from datetime import datetime
+ssl._create_default_https_context = ssl._create_unverified_context
+import requests.packages.urllib3
+requests.packages.urllib3.disable_warnings()
+
+# Create connection to CloudVision
+clnt = CvpClient()
+clnt.connect(['cvp1'],'username', 'password')
+
+
+cc_id = str(uuid.uuid4())
+name = f"Change_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
+
+# Create custom stage hierarchy
+# The below example would result in the following hierarchy:
+# root (series)
+# |- stages 1-2 (series)
+# | |- stage 1ab (parallel)
+# | | |- stage 1a
+# | | |- stage 1b
+# | |- stage 2
+# |- stage 3
+data = {'key': {
+ 'id': cc_id
+ },
+ 'change': {
+ 'name': cc_id,
+ 'notes': 'cvprac CC',
+ 'rootStageId': 'root',
+ 'stages': {'values': {'root': {'name': 'root',
+ 'rows': {'values': [{'values': ['1-2']},
+ {'values': ['3']}]
+ }
+ },
+ '1-2': {'name': 'stages 1-2',
+ 'rows': {'values': [{'values': ['1ab']},
+ {'values': ['2']}]}},
+ '1ab': {'name': 'stage 1ab',
+ 'rows': {'values': [{'values': ['1a','1b']}]
+ }
+ },
+ '1a': {'action': {'args': {'values': {'TaskID': '1242'}},
+ 'name': 'task',
+ 'timeout': 3000},
+ 'name': 'stage 1a'},
+ '1b': {'action': {'args': {'values': {'TaskID': '1243'}},
+ 'name': 'task',
+ 'timeout': 3000},
+ 'name': 'stage 1b'},
+ '2': {'action': {'args': {'values': {'TaskID': '1240'}},
+ 'name': 'task',
+ 'timeout': 3000},
+ 'name': 'stage 2'},
+ '3': {'action': {'args': {'values': {'TaskID': '1241'}},
+ 'name': 'task',
+ 'timeout': 3000},
+ 'name': 'stage 3'},
+ }
+ }
+ }
+ }
+# Create change control from custom stage hierarchy data
+clnt.api.change_control_create_with_custom_stages(data)
+
+# Approve the change control
+approval_note = "Approve CC via cvprac" # notes are optional
+clnt.api.change_control_approve(cc_id, notes=approval_note)
+
+# Start the change control
+start_note = "Starting CC via cvprac" # notes are optional
+clnt.api.change_control_start(cc_id, notes=start_note)
diff --git a/docs/labs/lab06-provisioning/change_control_workflow.py b/docs/labs/lab06-provisioning/change_control_workflow.py
new file mode 100644
index 0000000..c374edf
--- /dev/null
+++ b/docs/labs/lab06-provisioning/change_control_workflow.py
@@ -0,0 +1,27 @@
+# Copyright (c) 2021 Arista Networks, Inc.
+# Use of this source code is governed by the Apache License 2.0
+# that can be found in the COPYING file.
+
+from cvprac.cvp_client import CvpClient
+import ssl
+ssl._create_default_https_context = ssl._create_unverified_context
+import requests.packages.urllib3
+requests.packages.urllib3.disable_warnings()
+from datetime import datetime
+
+# Note API token auth method is not yet supported with Change Controls
+clnt = CvpClient()
+clnt.connect(['cvp1'],'username', 'password')
+
+ccid = 'cvprac0904211418'
+name = "cvprac CC test"
+tlist = ['1021','1020','1019','1018']
+
+### Create Change control with the list of tasks
+clnt.api.create_change_control_v3(ccid, name, tlist)
+
+### Approve CC
+clnt.api.approve_change_control('cvprac0904211418', timestamp=datetime.utcnow().isoformat() + 'Z')
+
+### Execute CC
+clnt.api.execute_change_controls(['cvprac0904211418'])
diff --git a/docs/labs/lab06-provisioning/change_control_workflow_rapi.py b/docs/labs/lab06-provisioning/change_control_workflow_rapi.py
new file mode 100644
index 0000000..299d16a
--- /dev/null
+++ b/docs/labs/lab06-provisioning/change_control_workflow_rapi.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2021 Arista Networks, Inc.
+# Use of this source code is governed by the Apache License 2.0
+# that can be found in the COPYING file.
+#
+# NOTE: The following example is using the new Change Control Resource APIs supported in 2021.2.0 or newer and in CVaaS.
+# For CVaaS service-account token based auth has to be used.
+
+from cvprac.cvp_client import CvpClient
+import ssl
+import uuid
+from datetime import datetime
+ssl._create_default_https_context = ssl._create_unverified_context
+import requests.packages.urllib3
+requests.packages.urllib3.disable_warnings()
+
+# Create connection to CloudVision
+clnt = CvpClient()
+clnt.connect(['cvp1'],'username', 'password')
+
+# Generate change control id and change control name
+cc_id = str(uuid.uuid4())
+name = f"Change_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
+
+# Select the tasks and create a CC where all tasks will be run in parallel
+tasks = ["1249","1250","1251","1252"]
+clnt.api.change_control_create_for_tasks(cc_id, name, tasks, series=False)
+
+# Approve the change control
+approve_note = "Approving CC via cvprac"
+clnt.api.change_control_approve(cc_id, notes=approve_note)
+
+# # Schedule the change control
+# # Executing scheduled CCs might only work post 2021.3.0+
+# schedule_note = "Scheduling CC via cvprac"
+# schedule_time = "2021-12-23T03:17:00Z"
+# clnt.api.change_control_schedule(cc_id,schedule_time,notes=schedule_note)
+
+# Start the change control
+start_note = "Start the CC via cvprac"
+clnt.api.change_control_start(cc_id, notes=start_note) \ No newline at end of file
diff --git a/docs/labs/lab06-provisioning/configlets/AVD_leaf1.cfg b/docs/labs/lab06-provisioning/configlets/AVD_leaf1.cfg
new file mode 100644
index 0000000..1339d6f
--- /dev/null
+++ b/docs/labs/lab06-provisioning/configlets/AVD_leaf1.cfg
@@ -0,0 +1,255 @@
+!RANCID-CONTENT-TYPE: arista
+!
+vlan internal order ascending range 1006 1199
+!
+transceiver qsfp default-mode 4x10G
+!
+service routing protocols model multi-agent
+!
+hostname leaf1
+ip name-server vrf default 8.8.8.8
+ip name-server vrf default 192.168.2.1
+dns domain atd.lab
+!
+spanning-tree mode mstp
+no spanning-tree vlan-id 4093-4094
+spanning-tree mst 0 priority 16384
+!
+no enable password
+no aaa root
+!
+vlan 110
+ name Tenant_A_OP_Zone_1
+!
+vlan 160
+ name Tenant_A_VMOTION
+!
+vlan 3009
+ name MLAG_iBGP_Tenant_A_OP_Zone
+ trunk group LEAF_PEER_L3
+!
+vlan 4093
+ name LEAF_PEER_L3
+ trunk group LEAF_PEER_L3
+!
+vlan 4094
+ name MLAG_PEER
+ trunk group MLAG
+!
+vrf instance Tenant_A_OP_Zone
+!
+interface Port-Channel1
+ description MLAG_PEER_leaf2_Po1
+ no shutdown
+ switchport
+ switchport trunk allowed vlan 2-4094
+ switchport mode trunk
+ switchport trunk group LEAF_PEER_L3
+ switchport trunk group MLAG
+!
+interface Port-Channel4
+ description host1_PortChannel
+ no shutdown
+ switchport
+ switchport access vlan 110
+ mlag 4
+!
+interface Ethernet1
+ description MLAG_PEER_leaf2_Ethernet1
+ no shutdown
+ channel-group 1 mode active
+!
+interface Ethernet2
+ description P2P_LINK_TO_SPINE1_Ethernet2
+ no shutdown
+ mtu 1500
+ no switchport
+ ip address 172.30.255.1/31
+!
+interface Ethernet3
+ description P2P_LINK_TO_SPINE2_Ethernet2
+ no shutdown
+ mtu 1500
+ no switchport
+ ip address 172.30.255.3/31
+!
+interface Ethernet4
+ description host1_Eth1
+ no shutdown
+ channel-group 4 mode active
+!
+interface Ethernet5
+ description host1_Eth2
+ no shutdown
+ channel-group 4 mode active
+!
+interface Ethernet6
+ description MLAG_PEER_leaf2_Ethernet6
+ no shutdown
+ channel-group 1 mode active
+!
+interface Loopback0
+ description EVPN_Overlay_Peering
+ no shutdown
+ ip address 192.0.255.3/32
+!
+interface Loopback1
+ description VTEP_VXLAN_Tunnel_Source
+ no shutdown
+ ip address 192.0.254.3/32
+!
+interface Loopback100
+ description Tenant_A_OP_Zone_VTEP_DIAGNOSTICS
+ no shutdown
+ vrf Tenant_A_OP_Zone
+ ip address 10.255.1.3/32
+!
+interface Management1
+ description oob_management
+ no shutdown
+ ip address 192.168.0.12/24
+!
+interface Vlan110
+ description Tenant_A_OP_Zone_1
+ no shutdown
+ vrf Tenant_A_OP_Zone
+ ip address virtual 10.1.10.1/24
+!
+interface Vlan3009
+ description MLAG_PEER_L3_iBGP: vrf Tenant_A_OP_Zone
+ no shutdown
+ mtu 1500
+ vrf Tenant_A_OP_Zone
+ ip address 10.255.251.0/31
+!
+interface Vlan4093
+ description MLAG_PEER_L3_PEERING
+ no shutdown
+ mtu 1500
+ ip address 10.255.251.0/31
+!
+interface Vlan4094
+ description MLAG_PEER
+ no shutdown
+ mtu 1500
+ no autostate
+ ip address 10.255.252.0/31
+!
+interface Vxlan1
+ description leaf1_VTEP
+ vxlan source-interface Loopback1
+ vxlan virtual-router encapsulation mac-address mlag-system-id
+ vxlan udp-port 4789
+ vxlan vlan 110 vni 10110
+ vxlan vlan 160 vni 55160
+ vxlan vrf Tenant_A_OP_Zone vni 10
+!
+ip virtual-router mac-address 00:1c:73:00:dc:01
+!
+ip address virtual source-nat vrf Tenant_A_OP_Zone address 10.255.1.3
+!
+ip routing
+ip routing vrf Tenant_A_OP_Zone
+!
+ip prefix-list PL-LOOPBACKS-EVPN-OVERLAY
+ seq 10 permit 192.0.255.0/24 eq 32
+ seq 20 permit 192.0.254.0/24 eq 32
+!
+mlag configuration
+ domain-id pod1
+ local-interface Vlan4094
+ peer-address 10.255.252.1
+ peer-link Port-Channel1
+ reload-delay mlag 300
+ reload-delay non-mlag 330
+!
+ip route 0.0.0.0/0 192.168.0.1
+!
+route-map RM-CONN-2-BGP permit 10
+ match ip address prefix-list PL-LOOPBACKS-EVPN-OVERLAY
+!
+route-map RM-MLAG-PEER-IN permit 10
+ description Make routes learned over MLAG Peer-link less preferred on spines to ensure optimal routing
+ set origin incomplete
+!
+router bfd
+ multihop interval 1200 min-rx 1200 multiplier 3
+!
+router bgp 65101
+ router-id 192.0.255.3
+ no bgp default ipv4-unicast
+ distance bgp 20 200 200
+ graceful-restart restart-time 300
+ graceful-restart
+ maximum-paths 4 ecmp 4
+ neighbor EVPN-OVERLAY-PEERS peer group
+ neighbor EVPN-OVERLAY-PEERS update-source Loopback0
+ neighbor EVPN-OVERLAY-PEERS bfd
+ neighbor EVPN-OVERLAY-PEERS ebgp-multihop 3
+ neighbor EVPN-OVERLAY-PEERS password 7 q+VNViP5i4rVjW1cxFv2wA==
+ neighbor EVPN-OVERLAY-PEERS send-community
+ neighbor EVPN-OVERLAY-PEERS maximum-routes 0
+ neighbor IPv4-UNDERLAY-PEERS peer group
+ neighbor IPv4-UNDERLAY-PEERS password 7 AQQvKeimxJu+uGQ/yYvv9w==
+ neighbor IPv4-UNDERLAY-PEERS send-community
+ neighbor IPv4-UNDERLAY-PEERS maximum-routes 12000
+ neighbor MLAG-IPv4-UNDERLAY-PEER peer group
+ neighbor MLAG-IPv4-UNDERLAY-PEER remote-as 65101
+ neighbor MLAG-IPv4-UNDERLAY-PEER next-hop-self
+ neighbor MLAG-IPv4-UNDERLAY-PEER description leaf2
+ neighbor MLAG-IPv4-UNDERLAY-PEER password 7 vnEaG8gMeQf3d3cN6PktXQ==
+ neighbor MLAG-IPv4-UNDERLAY-PEER send-community
+ neighbor MLAG-IPv4-UNDERLAY-PEER maximum-routes 12000
+ neighbor MLAG-IPv4-UNDERLAY-PEER route-map RM-MLAG-PEER-IN in
+ neighbor 10.255.251.1 peer group MLAG-IPv4-UNDERLAY-PEER
+ neighbor 10.255.251.1 description leaf2
+ neighbor 172.30.255.0 peer group IPv4-UNDERLAY-PEERS
+ neighbor 172.30.255.0 remote-as 65001
+ neighbor 172.30.255.0 description spine1_Ethernet2
+ neighbor 172.30.255.2 peer group IPv4-UNDERLAY-PEERS
+ neighbor 172.30.255.2 remote-as 65001
+ neighbor 172.30.255.2 description spine2_Ethernet2
+ neighbor 192.0.255.1 peer group EVPN-OVERLAY-PEERS
+ neighbor 192.0.255.1 remote-as 65001
+ neighbor 192.0.255.1 description spine1
+ neighbor 192.0.255.2 peer group EVPN-OVERLAY-PEERS
+ neighbor 192.0.255.2 remote-as 65001
+ neighbor 192.0.255.2 description spine2
+ redistribute connected route-map RM-CONN-2-BGP
+ !
+ vlan-aware-bundle Tenant_A_OP_Zone
+ rd 192.0.255.3:10
+ route-target both 10:10
+ redistribute learned
+ vlan 110
+ !
+ vlan-aware-bundle Tenant_A_VMOTION
+ rd 192.0.255.3:55160
+ route-target both 55160:55160
+ redistribute learned
+ vlan 160
+ !
+ address-family evpn
+ neighbor EVPN-OVERLAY-PEERS activate
+ !
+ address-family ipv4
+ no neighbor EVPN-OVERLAY-PEERS activate
+ neighbor IPv4-UNDERLAY-PEERS activate
+ neighbor MLAG-IPv4-UNDERLAY-PEER activate
+ !
+ vrf Tenant_A_OP_Zone
+ rd 192.0.255.3:10
+ route-target import evpn 10:10
+ route-target export evpn 10:10
+ router-id 192.0.255.3
+ neighbor 10.255.251.1 peer group MLAG-IPv4-UNDERLAY-PEER
+ redistribute connected
+!
+management api http-commands
+ protocol https
+ no shutdown
+ !
+ vrf default
+ no shutdown
+!
+end
diff --git a/docs/labs/lab06-provisioning/configlets/AVD_leaf2.cfg b/docs/labs/lab06-provisioning/configlets/AVD_leaf2.cfg
new file mode 100644
index 0000000..7305516
--- /dev/null
+++ b/docs/labs/lab06-provisioning/configlets/AVD_leaf2.cfg
@@ -0,0 +1,255 @@
+!RANCID-CONTENT-TYPE: arista
+!
+vlan internal order ascending range 1006 1199
+!
+transceiver qsfp default-mode 4x10G
+!
+service routing protocols model multi-agent
+!
+hostname leaf2
+ip name-server vrf default 8.8.8.8
+ip name-server vrf default 192.168.2.1
+dns domain atd.lab
+!
+spanning-tree mode mstp
+no spanning-tree vlan-id 4093-4094
+spanning-tree mst 0 priority 16384
+!
+no enable password
+no aaa root
+!
+vlan 110
+ name Tenant_A_OP_Zone_1
+!
+vlan 160
+ name Tenant_A_VMOTION
+!
+vlan 3009
+ name MLAG_iBGP_Tenant_A_OP_Zone
+ trunk group LEAF_PEER_L3
+!
+vlan 4093
+ name LEAF_PEER_L3
+ trunk group LEAF_PEER_L3
+!
+vlan 4094
+ name MLAG_PEER
+ trunk group MLAG
+!
+vrf instance Tenant_A_OP_Zone
+!
+interface Port-Channel1
+ description MLAG_PEER_leaf1_Po1
+ no shutdown
+ switchport
+ switchport trunk allowed vlan 2-4094
+ switchport mode trunk
+ switchport trunk group LEAF_PEER_L3
+ switchport trunk group MLAG
+!
+interface Port-Channel4
+ description host1_PortChannel
+ no shutdown
+ switchport
+ switchport access vlan 110
+ mlag 4
+!
+interface Ethernet1
+ description MLAG_PEER_leaf1_Ethernet1
+ no shutdown
+ channel-group 1 mode active
+!
+interface Ethernet2
+ description P2P_LINK_TO_SPINE1_Ethernet3
+ no shutdown
+ mtu 1500
+ no switchport
+ ip address 172.30.255.5/31
+!
+interface Ethernet3
+ description P2P_LINK_TO_SPINE2_Ethernet3
+ no shutdown
+ mtu 1500
+ no switchport
+ ip address 172.30.255.7/31
+!
+interface Ethernet4
+ description host1_Eth3
+ no shutdown
+ channel-group 4 mode active
+!
+interface Ethernet5
+ description host1_Eth4
+ no shutdown
+ channel-group 4 mode active
+!
+interface Ethernet6
+ description MLAG_PEER_leaf1_Ethernet6
+ no shutdown
+ channel-group 1 mode active
+!
+interface Loopback0
+ description EVPN_Overlay_Peering
+ no shutdown
+ ip address 192.0.255.4/32
+!
+interface Loopback1
+ description VTEP_VXLAN_Tunnel_Source
+ no shutdown
+ ip address 192.0.254.3/32
+!
+interface Loopback100
+ description Tenant_A_OP_Zone_VTEP_DIAGNOSTICS
+ no shutdown
+ vrf Tenant_A_OP_Zone
+ ip address 10.255.1.4/32
+!
+interface Management1
+ description oob_management
+ no shutdown
+ ip address 192.168.0.13/24
+!
+interface Vlan110
+ description Tenant_A_OP_Zone_1
+ no shutdown
+ vrf Tenant_A_OP_Zone
+ ip address virtual 10.1.10.1/24
+!
+interface Vlan3009
+ description MLAG_PEER_L3_iBGP: vrf Tenant_A_OP_Zone
+ no shutdown
+ mtu 1500
+ vrf Tenant_A_OP_Zone
+ ip address 10.255.251.1/31
+!
+interface Vlan4093
+ description MLAG_PEER_L3_PEERING
+ no shutdown
+ mtu 1500
+ ip address 10.255.251.1/31
+!
+interface Vlan4094
+ description MLAG_PEER
+ no shutdown
+ mtu 1500
+ no autostate
+ ip address 10.255.252.1/31
+!
+interface Vxlan1
+ description leaf2_VTEP
+ vxlan source-interface Loopback1
+ vxlan virtual-router encapsulation mac-address mlag-system-id
+ vxlan udp-port 4789
+ vxlan vlan 110 vni 10110
+ vxlan vlan 160 vni 55160
+ vxlan vrf Tenant_A_OP_Zone vni 10
+!
+ip virtual-router mac-address 00:1c:73:00:dc:01
+!
+ip address virtual source-nat vrf Tenant_A_OP_Zone address 10.255.1.4
+!
+ip routing
+ip routing vrf Tenant_A_OP_Zone
+!
+ip prefix-list PL-LOOPBACKS-EVPN-OVERLAY
+ seq 10 permit 192.0.255.0/24 eq 32
+ seq 20 permit 192.0.254.0/24 eq 32
+!
+mlag configuration
+ domain-id pod1
+ local-interface Vlan4094
+ peer-address 10.255.252.0
+ peer-link Port-Channel1
+ reload-delay mlag 300
+ reload-delay non-mlag 330
+!
+ip route 0.0.0.0/0 192.168.0.1
+!
+route-map RM-CONN-2-BGP permit 10
+ match ip address prefix-list PL-LOOPBACKS-EVPN-OVERLAY
+!
+route-map RM-MLAG-PEER-IN permit 10
+ description Make routes learned over MLAG Peer-link less preferred on spines to ensure optimal routing
+ set origin incomplete
+!
+router bfd
+ multihop interval 1200 min-rx 1200 multiplier 3
+!
+router bgp 65101
+ router-id 192.0.255.4
+ no bgp default ipv4-unicast
+ distance bgp 20 200 200
+ graceful-restart restart-time 300
+ graceful-restart
+ maximum-paths 4 ecmp 4
+ neighbor EVPN-OVERLAY-PEERS peer group
+ neighbor EVPN-OVERLAY-PEERS update-source Loopback0
+ neighbor EVPN-OVERLAY-PEERS bfd
+ neighbor EVPN-OVERLAY-PEERS ebgp-multihop 3
+ neighbor EVPN-OVERLAY-PEERS password 7 q+VNViP5i4rVjW1cxFv2wA==
+ neighbor EVPN-OVERLAY-PEERS send-community
+ neighbor EVPN-OVERLAY-PEERS maximum-routes 0
+ neighbor IPv4-UNDERLAY-PEERS peer group
+ neighbor IPv4-UNDERLAY-PEERS password 7 AQQvKeimxJu+uGQ/yYvv9w==
+ neighbor IPv4-UNDERLAY-PEERS send-community
+ neighbor IPv4-UNDERLAY-PEERS maximum-routes 12000
+ neighbor MLAG-IPv4-UNDERLAY-PEER peer group
+ neighbor MLAG-IPv4-UNDERLAY-PEER remote-as 65101
+ neighbor MLAG-IPv4-UNDERLAY-PEER next-hop-self
+ neighbor MLAG-IPv4-UNDERLAY-PEER description leaf1
+ neighbor MLAG-IPv4-UNDERLAY-PEER password 7 vnEaG8gMeQf3d3cN6PktXQ==
+ neighbor MLAG-IPv4-UNDERLAY-PEER send-community
+ neighbor MLAG-IPv4-UNDERLAY-PEER maximum-routes 12000
+ neighbor MLAG-IPv4-UNDERLAY-PEER route-map RM-MLAG-PEER-IN in
+ neighbor 10.255.251.0 peer group MLAG-IPv4-UNDERLAY-PEER
+ neighbor 10.255.251.0 description leaf1
+ neighbor 172.30.255.4 peer group IPv4-UNDERLAY-PEERS
+ neighbor 172.30.255.4 remote-as 65001
+ neighbor 172.30.255.4 description spine1_Ethernet3
+ neighbor 172.30.255.6 peer group IPv4-UNDERLAY-PEERS
+ neighbor 172.30.255.6 remote-as 65001
+ neighbor 172.30.255.6 description spine2_Ethernet3
+ neighbor 192.0.255.1 peer group EVPN-OVERLAY-PEERS
+ neighbor 192.0.255.1 remote-as 65001
+ neighbor 192.0.255.1 description spine1
+ neighbor 192.0.255.2 peer group EVPN-OVERLAY-PEERS
+ neighbor 192.0.255.2 remote-as 65001
+ neighbor 192.0.255.2 description spine2
+ redistribute connected route-map RM-CONN-2-BGP
+ !
+ vlan-aware-bundle Tenant_A_OP_Zone
+ rd 192.0.255.4:10
+ route-target both 10:10
+ redistribute learned
+ vlan 110
+ !
+ vlan-aware-bundle Tenant_A_VMOTION
+ rd 192.0.255.4:55160
+ route-target both 55160:55160
+ redistribute learned
+ vlan 160
+ !
+ address-family evpn
+ neighbor EVPN-OVERLAY-PEERS activate
+ !
+ address-family ipv4
+ no neighbor EVPN-OVERLAY-PEERS activate
+ neighbor IPv4-UNDERLAY-PEERS activate
+ neighbor MLAG-IPv4-UNDERLAY-PEER activate
+ !
+ vrf Tenant_A_OP_Zone
+ rd 192.0.255.4:10
+ route-target import evpn 10:10
+ route-target export evpn 10:10
+ router-id 192.0.255.4
+ neighbor 10.255.251.0 peer group MLAG-IPv4-UNDERLAY-PEER
+ redistribute connected
+!
+management api http-commands
+ protocol https
+ no shutdown
+ !
+ vrf default
+ no shutdown
+!
+end
diff --git a/docs/labs/lab06-provisioning/configlets/AVD_leaf3.cfg b/docs/labs/lab06-provisioning/configlets/AVD_leaf3.cfg
new file mode 100644
index 0000000..b71d210
--- /dev/null
+++ b/docs/labs/lab06-provisioning/configlets/AVD_leaf3.cfg
@@ -0,0 +1,255 @@
+!RANCID-CONTENT-TYPE: arista
+!
+vlan internal order ascending range 1006 1199
+!
+transceiver qsfp default-mode 4x10G
+!
+service routing protocols model multi-agent
+!
+hostname leaf3
+ip name-server vrf default 8.8.8.8
+ip name-server vrf default 192.168.2.1
+dns domain atd.lab
+!
+spanning-tree mode mstp
+no spanning-tree vlan-id 4093-4094
+spanning-tree mst 0 priority 16384
+!
+no enable password
+no aaa root
+!
+vlan 110
+ name Tenant_A_OP_Zone_1
+!
+vlan 160
+ name Tenant_A_VMOTION
+!
+vlan 3009
+ name MLAG_iBGP_Tenant_A_OP_Zone
+ trunk group LEAF_PEER_L3
+!
+vlan 4093
+ name LEAF_PEER_L3
+ trunk group LEAF_PEER_L3
+!
+vlan 4094
+ name MLAG_PEER
+ trunk group MLAG
+!
+vrf instance Tenant_A_OP_Zone
+!
+interface Port-Channel1
+ description MLAG_PEER_leaf4_Po1
+ no shutdown
+ switchport
+ switchport trunk allowed vlan 2-4094
+ switchport mode trunk
+ switchport trunk group LEAF_PEER_L3
+ switchport trunk group MLAG
+!
+interface Port-Channel4
+ description host2_PortChannel
+ no shutdown
+ switchport
+ switchport access vlan 110
+ mlag 4
+!
+interface Ethernet1
+ description MLAG_PEER_leaf4_Ethernet1
+ no shutdown
+ channel-group 1 mode active
+!
+interface Ethernet2
+ description P2P_LINK_TO_SPINE1_Ethernet4
+ no shutdown
+ mtu 1500
+ no switchport
+ ip address 172.30.255.9/31
+!
+interface Ethernet3
+ description P2P_LINK_TO_SPINE2_Ethernet4
+ no shutdown
+ mtu 1500
+ no switchport
+ ip address 172.30.255.11/31
+!
+interface Ethernet4
+ description host2_Eth1
+ no shutdown
+ channel-group 4 mode active
+!
+interface Ethernet5
+ description host2_Eth2
+ no shutdown
+ channel-group 4 mode active
+!
+interface Ethernet6
+ description MLAG_PEER_leaf4_Ethernet6
+ no shutdown
+ channel-group 1 mode active
+!
+interface Loopback0
+ description EVPN_Overlay_Peering
+ no shutdown
+ ip address 192.0.255.5/32
+!
+interface Loopback1
+ description VTEP_VXLAN_Tunnel_Source
+ no shutdown
+ ip address 192.0.254.5/32
+!
+interface Loopback100
+ description Tenant_A_OP_Zone_VTEP_DIAGNOSTICS
+ no shutdown
+ vrf Tenant_A_OP_Zone
+ ip address 10.255.1.5/32
+!
+interface Management1
+ description oob_management
+ no shutdown
+ ip address 192.168.0.14/24
+!
+interface Vlan110
+ description Tenant_A_OP_Zone_1
+ no shutdown
+ vrf Tenant_A_OP_Zone
+ ip address virtual 10.1.10.1/24
+!
+interface Vlan3009
+ description MLAG_PEER_L3_iBGP: vrf Tenant_A_OP_Zone
+ no shutdown
+ mtu 1500
+ vrf Tenant_A_OP_Zone
+ ip address 10.255.251.4/31
+!
+interface Vlan4093
+ description MLAG_PEER_L3_PEERING
+ no shutdown
+ mtu 1500
+ ip address 10.255.251.4/31
+!
+interface Vlan4094
+ description MLAG_PEER
+ no shutdown
+ mtu 1500
+ no autostate
+ ip address 10.255.252.4/31
+!
+interface Vxlan1
+ description leaf3_VTEP
+ vxlan source-interface Loopback1
+ vxlan virtual-router encapsulation mac-address mlag-system-id
+ vxlan udp-port 4789
+ vxlan vlan 110 vni 10110
+ vxlan vlan 160 vni 55160
+ vxlan vrf Tenant_A_OP_Zone vni 10
+!
+ip virtual-router mac-address 00:1c:73:00:dc:01
+!
+ip address virtual source-nat vrf Tenant_A_OP_Zone address 10.255.1.5
+!
+ip routing
+ip routing vrf Tenant_A_OP_Zone
+!
+ip prefix-list PL-LOOPBACKS-EVPN-OVERLAY
+ seq 10 permit 192.0.255.0/24 eq 32
+ seq 20 permit 192.0.254.0/24 eq 32
+!
+mlag configuration
+ domain-id pod2
+ local-interface Vlan4094
+ peer-address 10.255.252.5
+ peer-link Port-Channel1
+ reload-delay mlag 300
+ reload-delay non-mlag 330
+!
+ip route 0.0.0.0/0 192.168.0.1
+!
+route-map RM-CONN-2-BGP permit 10
+ match ip address prefix-list PL-LOOPBACKS-EVPN-OVERLAY
+!
+route-map RM-MLAG-PEER-IN permit 10
+ description Make routes learned over MLAG Peer-link less preferred on spines to ensure optimal routing
+ set origin incomplete
+!
+router bfd
+ multihop interval 1200 min-rx 1200 multiplier 3
+!
+router bgp 65102
+ router-id 192.0.255.5
+ no bgp default ipv4-unicast
+ distance bgp 20 200 200
+ graceful-restart restart-time 300
+ graceful-restart
+ maximum-paths 4 ecmp 4
+ neighbor EVPN-OVERLAY-PEERS peer group
+ neighbor EVPN-OVERLAY-PEERS update-source Loopback0
+ neighbor EVPN-OVERLAY-PEERS bfd
+ neighbor EVPN-OVERLAY-PEERS ebgp-multihop 3
+ neighbor EVPN-OVERLAY-PEERS password 7 q+VNViP5i4rVjW1cxFv2wA==
+ neighbor EVPN-OVERLAY-PEERS send-community
+ neighbor EVPN-OVERLAY-PEERS maximum-routes 0
+ neighbor IPv4-UNDERLAY-PEERS peer group
+ neighbor IPv4-UNDERLAY-PEERS password 7 AQQvKeimxJu+uGQ/yYvv9w==
+ neighbor IPv4-UNDERLAY-PEERS send-community
+ neighbor IPv4-UNDERLAY-PEERS maximum-routes 12000
+ neighbor MLAG-IPv4-UNDERLAY-PEER peer group
+ neighbor MLAG-IPv4-UNDERLAY-PEER remote-as 65102
+ neighbor MLAG-IPv4-UNDERLAY-PEER next-hop-self
+ neighbor MLAG-IPv4-UNDERLAY-PEER description leaf4
+ neighbor MLAG-IPv4-UNDERLAY-PEER password 7 vnEaG8gMeQf3d3cN6PktXQ==
+ neighbor MLAG-IPv4-UNDERLAY-PEER send-community
+ neighbor MLAG-IPv4-UNDERLAY-PEER maximum-routes 12000
+ neighbor MLAG-IPv4-UNDERLAY-PEER route-map RM-MLAG-PEER-IN in
+ neighbor 10.255.251.5 peer group MLAG-IPv4-UNDERLAY-PEER
+ neighbor 10.255.251.5 description leaf4
+ neighbor 172.30.255.8 peer group IPv4-UNDERLAY-PEERS
+ neighbor 172.30.255.8 remote-as 65001
+ neighbor 172.30.255.8 description spine1_Ethernet4
+ neighbor 172.30.255.10 peer group IPv4-UNDERLAY-PEERS
+ neighbor 172.30.255.10 remote-as 65001
+ neighbor 172.30.255.10 description spine2_Ethernet4
+ neighbor 192.0.255.1 peer group EVPN-OVERLAY-PEERS
+ neighbor 192.0.255.1 remote-as 65001
+ neighbor 192.0.255.1 description spine1
+ neighbor 192.0.255.2 peer group EVPN-OVERLAY-PEERS
+ neighbor 192.0.255.2 remote-as 65001
+ neighbor 192.0.255.2 description spine2
+ redistribute connected route-map RM-CONN-2-BGP
+ !
+ vlan-aware-bundle Tenant_A_OP_Zone
+ rd 192.0.255.5:10
+ route-target both 10:10
+ redistribute learned
+ vlan 110
+ !
+ vlan-aware-bundle Tenant_A_VMOTION
+ rd 192.0.255.5:55160
+ route-target both 55160:55160
+ redistribute learned
+ vlan 160
+ !
+ address-family evpn
+ neighbor EVPN-OVERLAY-PEERS activate
+ !
+ address-family ipv4
+ no neighbor EVPN-OVERLAY-PEERS activate
+ neighbor IPv4-UNDERLAY-PEERS activate
+ neighbor MLAG-IPv4-UNDERLAY-PEER activate
+ !
+ vrf Tenant_A_OP_Zone
+ rd 192.0.255.5:10
+ route-target import evpn 10:10
+ route-target export evpn 10:10
+ router-id 192.0.255.5
+ neighbor 10.255.251.5 peer group MLAG-IPv4-UNDERLAY-PEER
+ redistribute connected
+!
+management api http-commands
+ protocol https
+ no shutdown
+ !
+ vrf default
+ no shutdown
+!
+end
diff --git a/docs/labs/lab06-provisioning/configlets/AVD_leaf4.cfg b/docs/labs/lab06-provisioning/configlets/AVD_leaf4.cfg
new file mode 100644
index 0000000..80e201d
--- /dev/null
+++ b/docs/labs/lab06-provisioning/configlets/AVD_leaf4.cfg
@@ -0,0 +1,255 @@
+!RANCID-CONTENT-TYPE: arista
+!
+vlan internal order ascending range 1006 1199
+!
+transceiver qsfp default-mode 4x10G
+!
+service routing protocols model multi-agent
+!
+hostname leaf4
+ip name-server vrf default 8.8.8.8
+ip name-server vrf default 192.168.2.1
+dns domain atd.lab
+!
+spanning-tree mode mstp
+no spanning-tree vlan-id 4093-4094
+spanning-tree mst 0 priority 16384
+!
+no enable password
+no aaa root
+!
+vlan 110
+ name Tenant_A_OP_Zone_1
+!
+vlan 160
+ name Tenant_A_VMOTION
+!
+vlan 3009
+ name MLAG_iBGP_Tenant_A_OP_Zone
+ trunk group LEAF_PEER_L3
+!
+vlan 4093
+ name LEAF_PEER_L3
+ trunk group LEAF_PEER_L3
+!
+vlan 4094
+ name MLAG_PEER
+ trunk group MLAG
+!
+vrf instance Tenant_A_OP_Zone
+!
+interface Port-Channel1
+ description MLAG_PEER_leaf3_Po1
+ no shutdown
+ switchport
+ switchport trunk allowed vlan 2-4094
+ switchport mode trunk
+ switchport trunk group LEAF_PEER_L3
+ switchport trunk group MLAG
+!
+interface Port-Channel4
+ description host2_PortChannel
+ no shutdown
+ switchport
+ switchport access vlan 110
+ mlag 4
+!
+interface Ethernet1
+ description MLAG_PEER_leaf3_Ethernet1
+ no shutdown
+ channel-group 1 mode active
+!
+interface Ethernet2
+ description P2P_LINK_TO_SPINE1_Ethernet5
+ no shutdown
+ mtu 1500
+ no switchport
+ ip address 172.30.255.13/31
+!
+interface Ethernet3
+ description P2P_LINK_TO_SPINE2_Ethernet5
+ no shutdown
+ mtu 1500
+ no switchport
+ ip address 172.30.255.15/31
+!
+interface Ethernet4
+ description host2_Eth3
+ no shutdown
+ channel-group 4 mode active
+!
+interface Ethernet5
+ description host2_Eth4
+ no shutdown
+ channel-group 4 mode active
+!
+interface Ethernet6
+ description MLAG_PEER_leaf3_Ethernet6
+ no shutdown
+ channel-group 1 mode active
+!
+interface Loopback0
+ description EVPN_Overlay_Peering
+ no shutdown
+ ip address 192.0.255.6/32
+!
+interface Loopback1
+ description VTEP_VXLAN_Tunnel_Source
+ no shutdown
+ ip address 192.0.254.5/32
+!
+interface Loopback100
+ description Tenant_A_OP_Zone_VTEP_DIAGNOSTICS
+ no shutdown
+ vrf Tenant_A_OP_Zone
+ ip address 10.255.1.6/32
+!
+interface Management1
+ description oob_management
+ no shutdown
+ ip address 192.168.0.15/24
+!
+interface Vlan110
+ description Tenant_A_OP_Zone_1
+ no shutdown
+ vrf Tenant_A_OP_Zone
+ ip address virtual 10.1.10.1/24
+!
+interface Vlan3009
+ description MLAG_PEER_L3_iBGP: vrf Tenant_A_OP_Zone
+ no shutdown
+ mtu 1500
+ vrf Tenant_A_OP_Zone
+ ip address 10.255.251.5/31
+!
+interface Vlan4093
+ description MLAG_PEER_L3_PEERING
+ no shutdown
+ mtu 1500
+ ip address 10.255.251.5/31
+!
+interface Vlan4094
+ description MLAG_PEER
+ no shutdown
+ mtu 1500
+ no autostate
+ ip address 10.255.252.5/31
+!
+interface Vxlan1
+ description leaf4_VTEP
+ vxlan source-interface Loopback1
+ vxlan virtual-router encapsulation mac-address mlag-system-id
+ vxlan udp-port 4789
+ vxlan vlan 110 vni 10110
+ vxlan vlan 160 vni 55160
+ vxlan vrf Tenant_A_OP_Zone vni 10
+!
+ip virtual-router mac-address 00:1c:73:00:dc:01
+!
+ip address virtual source-nat vrf Tenant_A_OP_Zone address 10.255.1.6
+!
+ip routing
+ip routing vrf Tenant_A_OP_Zone
+!
+ip prefix-list PL-LOOPBACKS-EVPN-OVERLAY
+ seq 10 permit 192.0.255.0/24 eq 32
+ seq 20 permit 192.0.254.0/24 eq 32
+!
+mlag configuration
+ domain-id pod2
+ local-interface Vlan4094
+ peer-address 10.255.252.4
+ peer-link Port-Channel1
+ reload-delay mlag 300
+ reload-delay non-mlag 330
+!
+ip route 0.0.0.0/0 192.168.0.1
+!
+route-map RM-CONN-2-BGP permit 10
+ match ip address prefix-list PL-LOOPBACKS-EVPN-OVERLAY
+!
+route-map RM-MLAG-PEER-IN permit 10
+ description Make routes learned over MLAG Peer-link less preferred on spines to ensure optimal routing
+ set origin incomplete
+!
+router bfd
+ multihop interval 1200 min-rx 1200 multiplier 3
+!
+router bgp 65102
+ router-id 192.0.255.6
+ no bgp default ipv4-unicast
+ distance bgp 20 200 200
+ graceful-restart restart-time 300
+ graceful-restart
+ maximum-paths 4 ecmp 4
+ neighbor EVPN-OVERLAY-PEERS peer group
+ neighbor EVPN-OVERLAY-PEERS update-source Loopback0
+ neighbor EVPN-OVERLAY-PEERS bfd
+ neighbor EVPN-OVERLAY-PEERS ebgp-multihop 3
+ neighbor EVPN-OVERLAY-PEERS password 7 q+VNViP5i4rVjW1cxFv2wA==
+ neighbor EVPN-OVERLAY-PEERS send-community
+ neighbor EVPN-OVERLAY-PEERS maximum-routes 0
+ neighbor IPv4-UNDERLAY-PEERS peer group
+ neighbor IPv4-UNDERLAY-PEERS password 7 AQQvKeimxJu+uGQ/yYvv9w==
+ neighbor IPv4-UNDERLAY-PEERS send-community
+ neighbor IPv4-UNDERLAY-PEERS maximum-routes 12000
+ neighbor MLAG-IPv4-UNDERLAY-PEER peer group
+ neighbor MLAG-IPv4-UNDERLAY-PEER remote-as 65102
+ neighbor MLAG-IPv4-UNDERLAY-PEER next-hop-self
+ neighbor MLAG-IPv4-UNDERLAY-PEER description leaf3
+ neighbor MLAG-IPv4-UNDERLAY-PEER password 7 vnEaG8gMeQf3d3cN6PktXQ==
+ neighbor MLAG-IPv4-UNDERLAY-PEER send-community
+ neighbor MLAG-IPv4-UNDERLAY-PEER maximum-routes 12000
+ neighbor MLAG-IPv4-UNDERLAY-PEER route-map RM-MLAG-PEER-IN in
+ neighbor 10.255.251.4 peer group MLAG-IPv4-UNDERLAY-PEER
+ neighbor 10.255.251.4 description leaf3
+ neighbor 172.30.255.12 peer group IPv4-UNDERLAY-PEERS
+ neighbor 172.30.255.12 remote-as 65001
+ neighbor 172.30.255.12 description spine1_Ethernet5
+ neighbor 172.30.255.14 peer group IPv4-UNDERLAY-PEERS
+ neighbor 172.30.255.14 remote-as 65001
+ neighbor 172.30.255.14 description spine2_Ethernet5
+ neighbor 192.0.255.1 peer group EVPN-OVERLAY-PEERS
+ neighbor 192.0.255.1 remote-as 65001
+ neighbor 192.0.255.1 description spine1
+ neighbor 192.0.255.2 peer group EVPN-OVERLAY-PEERS
+ neighbor 192.0.255.2 remote-as 65001
+ neighbor 192.0.255.2 description spine2
+ redistribute connected route-map RM-CONN-2-BGP
+ !
+ vlan-aware-bundle Tenant_A_OP_Zone
+ rd 192.0.255.6:10
+ route-target both 10:10
+ redistribute learned
+ vlan 110
+ !
+ vlan-aware-bundle Tenant_A_VMOTION
+ rd 192.0.255.6:55160
+ route-target both 55160:55160
+ redistribute learned
+ vlan 160
+ !
+ address-family evpn
+ neighbor EVPN-OVERLAY-PEERS activate
+ !
+ address-family ipv4
+ no neighbor EVPN-OVERLAY-PEERS activate
+ neighbor IPv4-UNDERLAY-PEERS activate
+ neighbor MLAG-IPv4-UNDERLAY-PEER activate
+ !
+ vrf Tenant_A_OP_Zone
+ rd 192.0.255.6:10
+ route-target import evpn 10:10
+ route-target export evpn 10:10
+ router-id 192.0.255.6
+ neighbor 10.255.251.4 peer group MLAG-IPv4-UNDERLAY-PEER
+ redistribute connected
+!
+management api http-commands
+ protocol https
+ no shutdown
+ !
+ vrf default
+ no shutdown
+!
+end
diff --git a/docs/labs/lab06-provisioning/configlets/AVD_spine1.cfg b/docs/labs/lab06-provisioning/configlets/AVD_spine1.cfg
new file mode 100644
index 0000000..df188d7
--- /dev/null
+++ b/docs/labs/lab06-provisioning/configlets/AVD_spine1.cfg
@@ -0,0 +1,129 @@
+!RANCID-CONTENT-TYPE: arista
+!
+vlan internal order ascending range 1006 1199
+!
+transceiver qsfp default-mode 4x10G
+!
+service routing protocols model multi-agent
+!
+hostname spine1
+ip name-server vrf default 8.8.8.8
+ip name-server vrf default 192.168.2.1
+dns domain atd.lab
+!
+spanning-tree mode none
+!
+no enable password
+no aaa root
+!
+interface Ethernet2
+ description P2P_LINK_TO_LEAF1_Ethernet2
+ no shutdown
+ mtu 1500
+ no switchport
+ ip address 172.30.255.0/31
+!
+interface Ethernet3
+ description P2P_LINK_TO_LEAF2_Ethernet2
+ no shutdown
+ mtu 1500
+ no switchport
+ ip address 172.30.255.4/31
+!
+interface Ethernet4
+ description P2P_LINK_TO_LEAF3_Ethernet2
+ no shutdown
+ mtu 1500
+ no switchport
+ ip address 172.30.255.8/31
+!
+interface Ethernet5
+ description P2P_LINK_TO_LEAF4_Ethernet2
+ no shutdown
+ mtu 1500
+ no switchport
+ ip address 172.30.255.12/31
+!
+interface Loopback0
+ description EVPN_Overlay_Peering
+ no shutdown
+ ip address 192.0.255.1/32
+!
+interface Management1
+ description oob_management
+ no shutdown
+ ip address 192.168.0.10/24
+!
+ip routing
+!
+ip prefix-list PL-LOOPBACKS-EVPN-OVERLAY
+ seq 10 permit 192.0.255.0/24 eq 32
+!
+ip route 0.0.0.0/0 192.168.0.1
+!
+route-map RM-CONN-2-BGP permit 10
+ match ip address prefix-list PL-LOOPBACKS-EVPN-OVERLAY
+!
+router bfd
+ multihop interval 1200 min-rx 1200 multiplier 3
+!
+router bgp 65001
+ router-id 192.0.255.1
+ no bgp default ipv4-unicast
+ distance bgp 20 200 200
+ graceful-restart restart-time 300
+ graceful-restart
+ maximum-paths 4 ecmp 4
+ neighbor EVPN-OVERLAY-PEERS peer group
+ neighbor EVPN-OVERLAY-PEERS next-hop-unchanged
+ neighbor EVPN-OVERLAY-PEERS update-source Loopback0
+ neighbor EVPN-OVERLAY-PEERS bfd
+ neighbor EVPN-OVERLAY-PEERS ebgp-multihop 3
+ neighbor EVPN-OVERLAY-PEERS password 7 q+VNViP5i4rVjW1cxFv2wA==
+ neighbor EVPN-OVERLAY-PEERS send-community
+ neighbor EVPN-OVERLAY-PEERS maximum-routes 0
+ neighbor IPv4-UNDERLAY-PEERS peer group
+ neighbor IPv4-UNDERLAY-PEERS password 7 AQQvKeimxJu+uGQ/yYvv9w==
+ neighbor IPv4-UNDERLAY-PEERS send-community
+ neighbor IPv4-UNDERLAY-PEERS maximum-routes 12000
+ neighbor 172.30.255.1 peer group IPv4-UNDERLAY-PEERS
+ neighbor 172.30.255.1 remote-as 65101
+ neighbor 172.30.255.1 description leaf1_Ethernet2
+ neighbor 172.30.255.5 peer group IPv4-UNDERLAY-PEERS
+ neighbor 172.30.255.5 remote-as 65101
+ neighbor 172.30.255.5 description leaf2_Ethernet2
+ neighbor 172.30.255.9 peer group IPv4-UNDERLAY-PEERS
+ neighbor 172.30.255.9 remote-as 65102
+ neighbor 172.30.255.9 description leaf3_Ethernet2
+ neighbor 172.30.255.13 peer group IPv4-UNDERLAY-PEERS
+ neighbor 172.30.255.13 remote-as 65102
+ neighbor 172.30.255.13 description leaf4_Ethernet2
+ neighbor 192.0.255.3 peer group EVPN-OVERLAY-PEERS
+ neighbor 192.0.255.3 remote-as 65101
+ neighbor 192.0.255.3 description leaf1
+ neighbor 192.0.255.4 peer group EVPN-OVERLAY-PEERS
+ neighbor 192.0.255.4 remote-as 65101
+ neighbor 192.0.255.4 description leaf2
+ neighbor 192.0.255.5 peer group EVPN-OVERLAY-PEERS
+ neighbor 192.0.255.5 remote-as 65102
+ neighbor 192.0.255.5 description leaf3
+ neighbor 192.0.255.6 peer group EVPN-OVERLAY-PEERS
+ neighbor 192.0.255.6 remote-as 65102
+ neighbor 192.0.255.6 description leaf4
+ redistribute connected route-map RM-CONN-2-BGP
+ !
+ address-family evpn
+ neighbor EVPN-OVERLAY-PEERS activate
+ !
+ address-family ipv4
+ no neighbor EVPN-OVERLAY-PEERS activate
+ neighbor IPv4-UNDERLAY-PEERS activate
+!
+management api http-commands
+ protocol https
+ no shutdown
+ !
+ vrf default
+ no shutdown
+!
+end
diff --git a/docs/labs/lab06-provisioning/configlets/AVD_spine2.cfg b/docs/labs/lab06-provisioning/configlets/AVD_spine2.cfg
new file mode 100644
index 0000000..0ad7bd1
--- /dev/null
+++ b/docs/labs/lab06-provisioning/configlets/AVD_spine2.cfg
@@ -0,0 +1,129 @@
+!RANCID-CONTENT-TYPE: arista
+!
+vlan internal order ascending range 1006 1199
+!
+transceiver qsfp default-mode 4x10G
+!
+service routing protocols model multi-agent
+!
+hostname spine2
+ip name-server vrf default 8.8.8.8
+ip name-server vrf default 192.168.2.1
+dns domain atd.lab
+!
+spanning-tree mode none
+!
+no enable password
+no aaa root
+!
+interface Ethernet2
+ description P2P_LINK_TO_LEAF1_Ethernet3
+ no shutdown
+ mtu 1500
+ no switchport
+ ip address 172.30.255.2/31
+!
+interface Ethernet3
+ description P2P_LINK_TO_LEAF2_Ethernet3
+ no shutdown
+ mtu 1500
+ no switchport
+ ip address 172.30.255.6/31
+!
+interface Ethernet4
+ description P2P_LINK_TO_LEAF3_Ethernet3
+ no shutdown
+ mtu 1500
+ no switchport
+ ip address 172.30.255.10/31
+!
+interface Ethernet5
+ description P2P_LINK_TO_LEAF4_Ethernet3
+ no shutdown
+ mtu 1500
+ no switchport
+ ip address 172.30.255.14/31
+!
+interface Loopback0
+ description EVPN_Overlay_Peering
+ no shutdown
+ ip address 192.0.255.2/32
+!
+interface Management1
+ description oob_management
+ no shutdown
+ ip address 192.168.0.11/24
+!
+ip routing
+!
+ip prefix-list PL-LOOPBACKS-EVPN-OVERLAY
+ seq 10 permit 192.0.255.0/24 eq 32
+!
+ip route 0.0.0.0/0 192.168.0.1
+!
+route-map RM-CONN-2-BGP permit 10
+ match ip address prefix-list PL-LOOPBACKS-EVPN-OVERLAY
+!
+router bfd
+ multihop interval 1200 min-rx 1200 multiplier 3
+!
+router bgp 65001
+ router-id 192.0.255.2
+ no bgp default ipv4-unicast
+ distance bgp 20 200 200
+ graceful-restart restart-time 300
+ graceful-restart
+ maximum-paths 4 ecmp 4
+ neighbor EVPN-OVERLAY-PEERS peer group
+ neighbor EVPN-OVERLAY-PEERS next-hop-unchanged
+ neighbor EVPN-OVERLAY-PEERS update-source Loopback0
+ neighbor EVPN-OVERLAY-PEERS bfd
+ neighbor EVPN-OVERLAY-PEERS ebgp-multihop 3
+ neighbor EVPN-OVERLAY-PEERS password 7 q+VNViP5i4rVjW1cxFv2wA==
+ neighbor EVPN-OVERLAY-PEERS send-community
+ neighbor EVPN-OVERLAY-PEERS maximum-routes 0
+ neighbor IPv4-UNDERLAY-PEERS peer group
+ neighbor IPv4-UNDERLAY-PEERS password 7 AQQvKeimxJu+uGQ/yYvv9w==
+ neighbor IPv4-UNDERLAY-PEERS send-community
+ neighbor IPv4-UNDERLAY-PEERS maximum-routes 12000
+ neighbor 172.30.255.3 peer group IPv4-UNDERLAY-PEERS
+ neighbor 172.30.255.3 remote-as 65101
+ neighbor 172.30.255.3 description leaf1_Ethernet3
+ neighbor 172.30.255.7 peer group IPv4-UNDERLAY-PEERS
+ neighbor 172.30.255.7 remote-as 65101
+ neighbor 172.30.255.7 description leaf2_Ethernet3
+ neighbor 172.30.255.11 peer group IPv4-UNDERLAY-PEERS
+ neighbor 172.30.255.11 remote-as 65102
+ neighbor 172.30.255.11 description leaf3_Ethernet3
+ neighbor 172.30.255.15 peer group IPv4-UNDERLAY-PEERS
+ neighbor 172.30.255.15 remote-as 65102
+ neighbor 172.30.255.15 description leaf4_Ethernet3
+ neighbor 192.0.255.3 peer group EVPN-OVERLAY-PEERS
+ neighbor 192.0.255.3 remote-as 65101
+ neighbor 192.0.255.3 description leaf1
+ neighbor 192.0.255.4 peer group EVPN-OVERLAY-PEERS
+ neighbor 192.0.255.4 remote-as 65101
+ neighbor 192.0.255.4 description leaf2
+ neighbor 192.0.255.5 peer group EVPN-OVERLAY-PEERS
+ neighbor 192.0.255.5 remote-as 65102
+ neighbor 192.0.255.5 description leaf3
+ neighbor 192.0.255.6 peer group EVPN-OVERLAY-PEERS
+ neighbor 192.0.255.6 remote-as 65102
+ neighbor 192.0.255.6 description leaf4
+ redistribute connected route-map RM-CONN-2-BGP
+ !
+ address-family evpn
+ neighbor EVPN-OVERLAY-PEERS activate
+ !
+ address-family ipv4
+ no neighbor EVPN-OVERLAY-PEERS activate
+ neighbor IPv4-UNDERLAY-PEERS activate
+!
+management api http-commands
+ protocol https
+ no shutdown
+ !
+ vrf default
+ no shutdown
+!
+end
diff --git a/docs/labs/lab06-provisioning/gen_builder.py b/docs/labs/lab06-provisioning/gen_builder.py
new file mode 100644
index 0000000..8cd389d
--- /dev/null
+++ b/docs/labs/lab06-provisioning/gen_builder.py
@@ -0,0 +1,63 @@
+# Copyright (c) 2020 Arista Networks, Inc.
+# Use of this source code is governed by the Apache License 2.0
+# that can be found in the COPYING file.
+
+from cvprac.cvp_client import CvpClient
+import ssl
+ssl._create_default_https_context = ssl._create_unverified_context
+import requests.packages.urllib3
+requests.packages.urllib3.disable_warnings()
+
+# Create connection to CloudVision
+clnt = CvpClient()
+clnt.connect(['cvp1'],'username', 'password')
+
+container_id = clnt.api.get_container_by_name("TP_LEAFS")['key']
+builder_name = 'SYS_TelemetryBuilderV3'
+configletBuilderID = clnt.api.get_configlet_by_name(builder_name)['key']
+
+payload = {"previewValues":[{
+ "fieldId":"vrf",
+ "value":"red"}],
+ "configletBuilderId":configletBuilderID,
+ "netElementIds":[],
+ "pageType":"container",
+ "containerId":container_id,
+ "containerToId":"",
+ "mode":"assign"}
+
+preview = clnt.post('/configlet/configletBuilderPreview.do', data=payload)
+
+generated_names_list = []
+generated_keys_list = []
+
+for i in preview['data']:
+ generated_names_list.append(i['configlet']['name'])
+ generated_keys_list.append(i['configlet']['key'])
+
+clnt.get("/configlet/searchConfiglets.do?objectId={}&objectType=container&type=ignoreDraft&queryparam={}&startIndex=0&endIndex=22&sortByColumn=&sortOrder=".format(container_id, builder_name.lower()))
+
+tempData = {"data":[{
+ "info":"Configlet Assign: to container TP_LEAFS",
+ "infoPreview":"<b>Configlet Assign:</b> to container TP_LEAFS",
+ "action":"associate",
+ "nodeType":"configlet",
+ "nodeId":"",
+ "toId":container_id,
+ "fromId":"","nodeName":"","fromName":"",
+ "toName":"TP_LEAFS",
+ "toIdType":"container",
+ "configletList":generated_keys_list,
+ "configletNamesList":generated_names_list,
+ "ignoreConfigletList":[],
+ "ignoreConfigletNamesList":[],
+ "configletBuilderList":[configletBuilderID],
+ "configletBuilderNamesList":[builder_name],
+ "ignoreConfigletBuilderList":[],
+ "ignoreConfigletBuilderNamesList":[]
+ }
+ ]
+ }
+
+clnt.api._add_temp_action(tempData)
+clnt.api._save_topology_v2([])
diff --git a/docs/labs/lab06-provisioning/mlag_issu.py b/docs/labs/lab06-provisioning/mlag_issu.py
new file mode 100644
index 0000000..307d418
--- /dev/null
+++ b/docs/labs/lab06-provisioning/mlag_issu.py
@@ -0,0 +1,220 @@
+#!/usr/bin/env python3
+#
+# python3 mlag_issu <upgrade inventory file> <MLAG peer to upgrade: 'peer1' or 'peer2'>"
+#
+# # Example of upgrade inventory file (YAML)
+# cvp_hosts:
+# - 192.168.0.191
+# - 192.168.0.192
+# - 192.168.0.193
+# cvp_username: cvpadmin
+# target_eos_version: 4.25.4M
+# target_terminattr_version: 1.13.6
+# mlag_couples:
+# - peer1: leaf101-1
+# peer2: leaf101-2
+# - peer1: leaf102-1
+# peer2: leaf102-2
+#
+# Note: upgrades are performed in parallel
+
+import sys
+import time
+import string
+import random
+from getpass import getpass
+import requests
+from requests.packages.urllib3.exceptions import InsecureRequestWarning
+from datetime import datetime
+from cvprac.cvp_client import CvpClient
+from cvprac.cvp_client_errors import CvpLoginError, CvpApiError
+from pprint import pprint
+from operator import itemgetter
+import yaml
+
+class CvpDeviceUpgrader(object):
+ def __init__(self, hosts, username, password):
+ requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
+ self.cvp_hosts = hosts
+ self.cvp_user = username
+ self.cvp_password = password
+ self.session = self._open_cvp_session()
+
+ def _open_cvp_session(self):
+ try:
+ client = CvpClient()
+ client.connect(
+ nodes=self.cvp_hosts,
+ username=self.cvp_user,
+ password=self.cvp_password,
+ request_timeout=300,
+ connect_timeout=30
+ )
+ return(client)
+ except CvpLoginError as e:
+ print(f"Cannot connect to CVP API: {e}")
+ exit()
+
+ def create_mlag_issu_change_control(self, taskIDs, deviceIDs):
+ cc_id = f"CC_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
+ pre_upgrade_stage = {'stage': [{
+ 'id': f"preU_{cc_id}",
+ 'name': 'pre_upgrade',
+ 'stage_row':[{'stage': [{
+ 'id': ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(9)),
+ 'action': {
+ 'name': 'mlaghealthcheck',
+ 'timeout': 0,
+ 'args': {
+ 'DeviceID': device_id
+ }
+ }
+ } for device_id in deviceIDs]}]
+ }]}
+ upgrade_stage = {'stage': [{
+ 'id': f"U_{cc_id}",
+ 'name': 'upgrade',
+ 'stage_row': [{'stage': [{
+ 'id': task_id,
+ 'action': {
+ 'name': 'task',
+ 'args': {
+ 'TaskID': task_id
+ }
+ }
+ } for task_id in taskIDs]}]
+ }]}
+ post_upgrade_stage = {'stage': [{
+ 'id': f"postU_{cc_id}",
+ 'name': 'post_upgrade',
+ 'stage_row': [{'stage': [{
+ 'id': ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(9)),
+ 'action': {
+ 'name': 'mlaghealthcheck',
+ 'timeout': 0,
+ 'args': {
+ 'DeviceID': device_id
+ }
+ }
+ } for device_id in deviceIDs]}]
+ }]}
+ cc_data = {'config': {
+ 'id': cc_id,
+ 'name': f"Change Control {cc_id}",
+ 'root_stage': {
+ 'id': 'root',
+ 'name': f"Change Control {cc_id} root",
+ 'stage_row': [pre_upgrade_stage, upgrade_stage, post_upgrade_stage],
+ }
+ }}
+ try:
+ res = self.session.post('/api/v3/services/ccapi.ChangeControl/Update',
+ data=cc_data,
+ timeout=self.session.api.request_timeout
+ )
+ except Exception as e:
+ print(str(e))
+ return(None)
+ print(f"Change control {res[0]['id']} created at {res[0]['update_timestamp']}")
+ return(res[0]['id'])
+
+ def get_mlag_issu_change_control_logs(self, ccID, startTime):
+ end_time = int(time.time() * 1000)
+ cc_logs_data = {'category': 'ChangeControl',
+ 'objectKey': ccID,
+ 'dataSize': 15000,
+ 'startTime': startTime,
+ 'endTime': end_time
+ }
+ logs = self.session.post('/cvpservice/audit/getLogs.do',
+ data=cc_logs_data,
+ timeout=self.session.api.request_timeout
+ )
+ for log in sorted(logs['data'], key=itemgetter('dateTimeInLongFormat')):
+ if log['subObjectName'] and 'Command(s)' not in log['activity']:
+ log_date = datetime.fromtimestamp(log['dateTimeInLongFormat']/1000)
+ print(f"{log_date} {log['subObjectName']}: {log['activity']}")
+ return(end_time + 1)
+
+ def run_mlag_issu_change_control(self, ccID):
+ print(f"Automatic approval of change control {ccID}")
+ self.session.api.approve_change_control(ccID, datetime.utcnow().isoformat() + 'Z')
+ time.sleep(2)
+ print(f"Starting the execution of change control {ccID}")
+ start_time = int(time.time() * 1000)
+ self.session.api.execute_change_controls([ccID])
+ time.sleep(2)
+ cc_status = self.session.api.get_change_control_status(ccID)[0]['status']
+ start_time = self.get_mlag_issu_change_control_logs(ccID, start_time)
+ while cc_status['state'] == 'Running':
+ time.sleep(30)
+ cc_status = self.session.api.get_change_control_status(ccID)[0]['status']
+ start_time = self.get_mlag_issu_change_control_logs(ccID, start_time)
+ print(f"Change control {ccID} final status: {cc_status['state']}")
+ if cc_status['error']:
+ print(f"Change control {ccID} had the following errors: {cc_status['error']}")
+ else:
+ print(f"Change control {ccID} completed without errors")
+
+def main():
+ if len(sys.argv) != 3:
+ print(f"Usage: python3 {sys.argv[0]} <input file path> <MLAG peer to upgrade: peer1/peer2>")
+ exit()
+ try:
+ with open(sys.argv[1], 'r') as yf:
+ params = yaml.safe_load(yf)
+ except Exception as e:
+ print(e)
+ exit()
+ cvp_password = getpass(prompt=f"CVP password for user {params['cvp_username']}: ")
+ cvpdu = CvpDeviceUpgrader(
+ hosts=params['cvp_hosts'],
+ username=params['cvp_username'],
+ password=cvp_password
+ )
+ image_bundle = None
+ for bundle in cvpdu.session.api.get_image_bundles()['data']:
+ eos_match = False
+ terminattr_match = False
+ for img in bundle['imageIds']:
+ if params['target_eos_version'] in img:
+ eos_match = True
+ elif params['target_terminattr_version'] in img:
+ terminattr_match = True
+ if eos_match and terminattr_match:
+ image_bundle = bundle
+ break
+ if image_bundle is None:
+ print(f"Cannot find an image bundle with EOS {params['target_eos_version']} and TerminAttr {params['target_terminattr_version']}")
+ exit()
+ hostnames = [couple[sys.argv[2]] for couple in params['mlag_couples']]
+ devices_to_upgrade = list()
+ inventory = cvpdu.session.api.get_inventory()
+ for hostname in hostnames:
+ provisioned = False
+ for dev in inventory:
+ if dev['hostname'] == hostname:
+ provisioned = True
+ devices_to_upgrade.append(dev)
+ break
+ if not provisioned:
+ print(f"Device with hostname {hostname} is not provisioned in CVP")
+ if not devices_to_upgrade:
+ print('none of the mentioned devices is provisioned in CVP')
+ exit()
+ print(f"Devices to upgrade: {', '.join([dev['hostname'] for dev in devices_to_upgrade])}")
+ task_ids = list()
+ for device in devices_to_upgrade:
+ response = cvpdu.session.api.apply_image_to_device(image_bundle, device)['data']
+ if response['status'] == 'success':
+ task_ids.extend(response['taskIds'])
+ device_ids = [device['serialNumber'] for device in devices_to_upgrade]
+ cc_id = cvpdu.create_mlag_issu_change_control(task_ids, device_ids)
+ if cc_id is None:
+ print('Failed to create the MLAG ISSU change control')
+ exit()
+ time.sleep(2)
+ cvpdu.run_mlag_issu_change_control(cc_id)
+
+if __name__ == '__main__':
+ main()
diff --git a/docs/labs/lab06-provisioning/move_device.py b/docs/labs/lab06-provisioning/move_device.py
new file mode 100644
index 0000000..5257f79
--- /dev/null
+++ b/docs/labs/lab06-provisioning/move_device.py
@@ -0,0 +1,24 @@
+# Copyright (c) 2021 Arista Networks, Inc.
+# Use of this source code is governed by the Apache License 2.0
+# that can be found in the COPYING file.
+
+from cvprac.cvp_client import CvpClient
+import ssl
+ssl._create_default_https_context = ssl._create_unverified_context
+import requests.packages.urllib3
+requests.packages.urllib3.disable_warnings()
+
+# Create connection to CloudVision
+with open("token.tok") as f:
+ token = f.read().strip('\n')
+
+clnt = CvpClient()
+clnt.connect(nodes=['cvp1'], username='',password='',api_token=token)
+
+container = clnt.api.get_container_by_name('TP_LEAFS') # container object
+
+app_name = "my app" # can be any string
+
+device = {"key":"00:1c:73:c5:4c:87", "fqdn":"co633.ire.aristanetworks.com"}
+
+move_device_to_container(app_name, device, container)
diff --git a/docs/labs/lab06-provisioning/vc_task_retrigger.py b/docs/labs/lab06-provisioning/vc_task_retrigger.py
new file mode 100644
index 0000000..b5586ee
--- /dev/null
+++ b/docs/labs/lab06-provisioning/vc_task_retrigger.py
@@ -0,0 +1,115 @@
+# Copyright (c) 2021 Arista Networks, Inc.
+# Use of this source code is governed by the Apache License 2.0
+# that can be found in the COPYING file.
+
+# Example on how to re-trigger task creation if a config push task was previously
+# cancelled and the device is still config out of sync
+import argparse
+import ssl
+import sys
+from pkg_resources import parse_version
+from getpass import getpass
+from cvprac.cvp_client import CvpClient
+import requests.packages.urllib3
+requests.packages.urllib3.disable_warnings()
+
+
+if ((sys.version_info.major == 3) or
+ (sys.version_info.major == 2 and sys.version_info.minor == 7 and
+ sys.version_info.micro >= 5)):
+ ssl._create_default_https_context = ssl._create_unverified_context
+
+
+def main():
+
+ compliance = {"0001": "Config is out of sync",
+ "0003": "Config & image out of sync",
+ "0004": "Config, Image and Device time are in sync",
+ "0005": "Device is not reachable",
+ "0008": "Config, Image and Extensions are out of sync",
+ "0009": "Config and Extensions are out of sync",
+ "0012": "Config, Image, Extension and Device time are out of sync",
+ "0013": "Config, Image and Device time are out of sync",
+ "0014": "Config, Extensions and Device time are out of sync",
+ "0016": "Config and Device time are out of sync"
+ }
+ # Create connection to CloudVision
+ clnt = CvpClient()
+
+ parser = argparse.ArgumentParser(
+ description='Script to recreate a task, if a previous config push was cancelled')
+ parser.add_argument('-u', '--username', default='username')
+ parser.add_argument('-p', '--password', default=None)
+ parser.add_argument('-c', '--cvpserver', action='append')
+ parser.add_argument('-f', '--filter', action='append', default=None)
+ args = parser.parse_args()
+
+ if args.password is None:
+ args.password = getpass()
+
+ for cvpserver in args.cvpserver:
+ print("Connecting to %s" % cvpserver)
+ try:
+ clnt.connect(nodes=[cvpserver], username=args.username, password=args.password)
+ except Exception as e:
+ print("Unable to connect to CVP: %s" % str(e))
+
+ # Get the current CVP version
+ cvp_release = clnt.api.get_cvp_info()['version']
+ if parse_version(cvp_release) < parse_version('2020.3.0'):
+ # For older CVP, we manually trigger a compliance check
+ try:
+ clnt.api.check_compliance('root', 'container')
+ except:
+ # Bad practice, but the check compliance applied to a container can't actually work
+ # since the complianceIndication key doesn't exist on the container level
+ pass
+ else:
+ # with continuous compliance checks, triggering the check is no longer required
+ pass
+
+ device_filters = []
+ if args.filter is not None:
+ for entry in args.filter:
+ device_filters.extend(entry.split(','))
+
+ # Get inventory
+ print("Collecting inventory...")
+ devices = clnt.api.get_inventory()
+ print("%d devices in inventory" % len(devices) )
+
+ for switch in devices:
+ if (switch['status'] == 'Registered' and
+ switch['parentContainerId'] != 'undefined_container'):
+
+ if len(device_filters) > 0:
+ # iterate over device filters, and update task for
+ # any devices not in compliance
+
+ for filter_term in device_filters:
+ print("Checking device: %s" % switch['hostname'])
+ if filter_term in switch['hostname']:
+ # generate configlet list
+ cl = clnt.api.get_configlets_by_device_id(switch['systemMacAddress'])
+ # generate a task if config is out of sync
+ if switch['complianceCode'] in compliance.keys():
+ print(clnt.api.apply_configlets_to_device("", switch, cl))
+ else:
+ print("%s is compliant, nothing to do" % switch['hostname'])
+ else:
+ print("Skipping %s due to filter" % switch['hostname'])
+ else:
+ print("Checking device: %s" % switch['hostname'])
+ cl = clnt.api.get_configlets_by_device_id(switch['systemMacAddress'])
+ # generate a task if config is out of sync
+ if switch['complianceCode'] in compliance.keys():
+ print(clnt.api.apply_configlets_to_device("", switch, cl))
+
+ else:
+ print("Skipping %s, device is unregistered for provisioning" % switch['hostname'])
+
+ return 0
+
+
+if __name__ == "__main__":
+ main()