Skip to content

Commit

Permalink
Merge pull request #79 from aristanetworks/avd-4.0-site
Browse files Browse the repository at this point in the history
Updating workshop for 4.0 models
  • Loading branch information
mthiel117 committed Jun 23, 2023
2 parents ff08cce + 163aa66 commit 7f2a165
Show file tree
Hide file tree
Showing 9 changed files with 188 additions and 194 deletions.
67 changes: 67 additions & 0 deletions workshops/assets/examples/avd/global_dc_vars.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
---

# Credentials for CVP and EOS Switches
ansible_user: arista
ansible_password: "{{ lookup('env', 'LABPASSPHRASE') }}"
ansible_network_os: arista.eos.eos
# Configure privilege escalation
ansible_become: true
ansible_become_method: enable
# HTTPAPI configuration
ansible_connection: httpapi
ansible_httpapi_port: 443
ansible_httpapi_use_ssl: true
ansible_httpapi_validate_certs: false
ansible_python_interpreter: $(which python3)
avd_data_conversion_mode: error
avd_data_validation_mode: error

# Local Users
local_users:
- name: arista
privilege: 15
role: network-admin
sha512_password: "{{ ansible_password | password_hash(salt='workshop') }}"

# AAA
aaa_authorization:
exec:
default: local

# OOB Management network default gateway.
mgmt_gateway: 192.168.0.1
mgmt_interface: Management0
mgmt_interface_vrf: default

# NTP Servers IP or DNS name, first NTP server will be preferred, and sourced from Management VRF
ntp:
servers:
- name: 192.168.0.1
iburst: true
local_interface: Management0

# Domain/DNS
dns_domain: atd.lab

# TerminAttr
daemon_terminattr:
# Address of the gRPC server on CloudVision
# TCP 9910 is used on on-prem
# TCP 443 is used on CV as a Service
cvaddrs: # For single cluster
- 192.168.0.5:9910
# Authentication scheme used to connect to CloudVision
cvauth:
method: token
token_file: "/tmp/token"
# Exclude paths from Sysdb on the ingest side
ingestexclude: /Sysdb/cell/1/agent,/Sysdb/cell/2/agent
# Exclude paths from the shared memory table
smashexcludes: ale,flexCounter,hardware,kni,pulse,strata

# Point to Point Links MTU Override for Lab
p2p_uplinks_mtu: 1500

# CVP node variables
cv_collection: v3
execute_tasks: true
2 changes: 1 addition & 1 deletion workshops/assets/examples/avd/playbooks/deploy.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,4 +12,4 @@

- name: Deploy Configuration to Device
import_role:
name: arista.avd.eos_config_deploy_eapi
name: arista.avd.eos_config_deploy_eapi
22 changes: 10 additions & 12 deletions workshops/assets/examples/avd/site_1/group_vars/SITE1_FABRIC.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,16 +17,16 @@ l3spine:
virtual_router_mac_address: 00:1c:73:00:dc:01
mlag_interfaces: [ Ethernet1, Ethernet6 ]
node_groups:
SPINES:
- group: SPINES
nodes:
s1-spine1:
- name: s1-spine1
id: 1
mgmt_ip: 192.168.0.10/24
s1-spine2:
- name: s1-spine2
id: 2
mgmt_ip: 192.168.0.11/24

# IDF - Leaf Switches
# Leaf Switches
leaf:
defaults:
platform: cEOS
Expand All @@ -37,29 +37,27 @@ leaf:
uplink_interfaces: [ Ethernet2, Ethernet3 ]
mlag_interfaces: [ Ethernet1, Ethernet6 ]
node_groups:
RACK1:
mlag: true
- group: RACK1
filter:
tags: [ "Web" ]
nodes:
s1-leaf1:
- name: s1-leaf1
id: 3
mgmt_ip: 192.168.0.12/24
uplink_switch_interfaces: [ Ethernet2, Ethernet2 ]
s1-leaf2:
- name: s1-leaf2
id: 4
mgmt_ip: 192.168.0.13/24
uplink_switch_interfaces: [ Ethernet3, Ethernet3 ]
RACK2:
mlag: true
- group: RACK2
filter:
tags: [ "App" ]
nodes:
s1-leaf3:
- name: s1-leaf3
id: 5
mgmt_ip: 192.168.0.14/24
uplink_switch_interfaces: [ Ethernet4, Ethernet4 ]
s1-leaf4:
- name: s1-leaf4
id: 6
mgmt_ip: 192.168.0.15/24
uplink_switch_interfaces: [ Ethernet5, Ethernet5 ]
Expand Down
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
---
port_profiles:

PP-VLAN10:
- profile: PP-VLAN10
mode: "access"
vlans: "10"
spanning_tree_portfast: edge
PP-VLAN20:
- profile: PP-VLAN20
mode: "access"
vlans: "20"
spanning_tree_portfast: edge
Expand All @@ -19,7 +20,7 @@ servers:
# Site1 RACK1 Endpoints
# --------------------------------------------------------#

s1-host1: # Server name
- name: s1-host1 # Server name
rack: RACK1 # Informational RACK (optional)
adapters:
- endpoint_ports: [ eth1, eth2 ] # Server port to connect (optional)
Expand All @@ -33,7 +34,7 @@ servers:
# Site1 RACK2 Endpoints
# --------------------------------------------------------#

s1-host2: # Server name
- name: s1-host2 # Server name
rack: RACK2 # Informational RACK (optional)
adapters:
- endpoint_ports: [ eth1, eth2 ] # Server port to connect (optional)
Expand Down
Original file line number Diff line number Diff line change
@@ -1,28 +1,28 @@
---
tenants:
MY_FABRIC:
- name: MY_FABRIC
vrfs:
default:
- name: default
svis:
10:
- id: 10
name: 'Ten'
tags: [ "Web" ]
enabled: true
ip_virtual_router_addresses:
- 10.10.10.1
nodes:
s1-spine1:
- node: s1-spine1
ip_address: 10.10.10.2/24
s1-spine2:
- node: s1-spine2
ip_address: 10.10.10.3/24
20:
- id: 20
name: 'Twenty'
tags: [ "App" ]
enabled: true
ip_virtual_router_addresses:
- 10.20.20.1
nodes:
s1-spine1:
- node: s1-spine1
ip_address: 10.20.20.2/24
s1-spine2:
- node: s1-spine2
ip_address: 10.20.20.3/24
46 changes: 23 additions & 23 deletions workshops/avd-lab-guide.md
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ git config --global user.email "[email protected]"
AVD has been pre-installed in your lab environment. However, it is on an older version. The following steps will update AVD and modules to the latest versions.

``` bash
ansible-galaxy collection install arista.avd arista.cvp --force
ansible-galaxy collection install -r requirements.yml
export ARISTA_AVD_DIR=$(ansible-galaxy collection list arista.avd --format yaml | head -1 | cut -d: -f1)
pip3 config set global.disable-pip-version-check true
pip3 install -r ${ARISTA_AVD_DIR}/arista/avd/requirements.txt
Expand Down Expand Up @@ -227,7 +227,7 @@ diff checkpoint:< filename > running-config

Let's configure port-channels to our hosts (`s1-host1` and `s1-host2`).

Open `SITE1_FABRIC_PORTS.yml` and uncomment lines 16-44, then run the build & deploy process again.
Open `SITE1_FABRIC_PORTS.yml` and uncomment lines 17-45, then run the build & deploy process again.

``` bash
make build-site-1
Expand Down Expand Up @@ -269,7 +269,7 @@ At this point, you should be able to ping between hosts within a site but not be

## **Connect Sites to WAN IP Network**

The WAN IP Network is defined by the `core_interfaces` data model. Full data model documentation is located [here](https://avd.sh/en/stable/roles/eos_designs/doc/core-interfaces-BETA.html).
The WAN IP Network is defined by the `core_interfaces` data model. Full data model documentation is located [here](https://avd.sh/en/v4.1.0/roles/eos_designs/docs/input-variables.html#core-interfaces-settings).

The data model defines P2P links (`/31s`) on the spines with a stanza per link. See details in the graphic below. Each spine has two links to the WAN IP Network configured on ports `Ethernet7` and `Ethernet8`. OSPF is added to these links as well.

Expand Down Expand Up @@ -369,7 +369,7 @@ show ip route

### **Test traffic between sites**

From `s1-host1` ping both `s2-host1` & `s1-host2`
From `s1-host1` ping both `s2-host1` & `s2-host2`

``` bash
# s2-host1
Expand Down Expand Up @@ -493,7 +493,7 @@ So far, so good! Before we publish our branch and create a Pull Request though,
### **Syslog Server**

Our next Day 2 change is to add a syslog server configuration to all of our switches. Once again, we'll take
a look at the [AVD documentation site](https://avd.sh/en/stable/roles/eos_cli_config_gen/README_v4.0.html?h=logging#logging) to see the
a look at the [AVD documentation site](https://avd.sh/en/v4.1.0/roles/eos_cli_config_gen/docs/input-variables.html#logging) to see the
data model associated with the `logging` input variable.

Just like with our banner, the syslog server configuration will be consistent on all of our switches. Because of this, we can also put this into
Expand All @@ -505,11 +505,11 @@ Add the code block below to `global_vars/global_dc_vars.yml`
# Syslog
logging:
vrfs:
default:
- name: default
source_interface: Management0
hosts:
10.200.0.108:
10.200.1.108:
- name: 10.200.0.108
- name: 10.200.1.108
```

Finally, let's build out our configurations
Expand Down Expand Up @@ -644,13 +644,13 @@ will be used to connect to s1-leaf6.
Starting at line 64, add the following code block into `sites/site_1/group_vars/SITE1_FABRIC.yml`

``` yaml
RACK3:
- group: RACK3
nodes:
s1-leaf5:
- name: s1-leaf5
id: 7
mgmt_ip: 192.168.0.28/24
uplink_switch_interfaces: [ Ethernet9, Ethernet9 ]
s1-leaf6:
- name: s1-leaf6
id: 8
mgmt_ip: 192.168.0.29/24
uplink_switch_interfaces: [ Ethernet10, Ethernet10 ]
Expand Down Expand Up @@ -682,12 +682,12 @@ The `sites/site_1/group_vars/SITE1_FABRIC.yml` file should now look like the exa
virtual_router_mac_address: 00:1c:73:00:dc:01
mlag_interfaces: [ Ethernet1, Ethernet6 ]
node_groups:
SPINES:
- group: SPINES
nodes:
s1-spine1:
- name: s1-spine1
id: 1
mgmt_ip: 192.168.0.10/24
s1-spine2:
- name: s1-spine2
id: 2
mgmt_ip: 192.168.0.11/24
Expand All @@ -702,37 +702,37 @@ The `sites/site_1/group_vars/SITE1_FABRIC.yml` file should now look like the exa
uplink_interfaces: [ Ethernet2, Ethernet3 ]
mlag_interfaces: [ Ethernet1, Ethernet6 ]
node_groups:
RACK1:
- group: RACK1
filter:
tags: [ "Web" ]
nodes:
s1-leaf1:
- name: s1-leaf1
id: 3
mgmt_ip: 192.168.0.12/24
uplink_switch_interfaces: [ Ethernet2, Ethernet2 ]
s1-leaf2:
- name: s1-leaf2
id: 4
mgmt_ip: 192.168.0.13/24
uplink_switch_interfaces: [ Ethernet3, Ethernet3 ]
RACK2:
- group: RACK2
filter:
tags: [ "App" ]
nodes:
s1-leaf3:
- name: s1-leaf3
id: 5
mgmt_ip: 192.168.0.14/24
uplink_switch_interfaces: [ Ethernet4, Ethernet4 ]
s1-leaf4:
- name: s1-leaf4
id: 6
mgmt_ip: 192.168.0.15/24
uplink_switch_interfaces: [ Ethernet5, Ethernet5 ]
RACK3:
- group: RACK3
nodes:
s1-leaf5:
- name: s1-leaf5
id: 7
mgmt_ip: 192.168.0.28/24
uplink_switch_interfaces: [ Ethernet9, Ethernet9 ]
s1-leaf6:
- name: s1-leaf6
id: 8
mgmt_ip: 192.168.0.29/24
uplink_switch_interfaces: [ Ethernet10, Ethernet10 ]
Expand Down
Loading

0 comments on commit 7f2a165

Please sign in to comment.