Skip to content

Instantly share code, notes, and snippets.

Embed
What would you like to do?
dhcpd.conf, for ztp-upgrade
===================================================
set vendor-string = option vendor-class-identifier;
log-facility local7;
option space juniper;
option juniper.image-file-name code 0 = text;
option juniper.config-file-name code 1 = text;
option juniper.image-file-type code 2 = text;
option juniper.transfer-mode code 3 = text;
option juniper.alt-image-file-name code 4 = text;
#option juniper.http-port code 5 = text;
#option juniper-encapsulation code 43 = encapsulate juniper;
option option-150 code 150 = {ip-address};
# for unicast dhcp (I.e. forwarded from a non-local client)
local-address 10.240.240.241;
default-lease-time 1800;
max-lease-time 1800;
# ztp-konfig via ztp-net locally
subnet 10.240.240.240 netmask 255.255.255.240 {
option routers 10.240.240.241
option subnet-mask 255.255.255.240;
option option-150 10.240.240.241; (where to download from)
pool {
if option vendor-class-identifier ~~ "^Juniper-ex2200-" {
vendor-option-space juniper;
option juniper.image-file-type "symlink";
option juniper.transfer-mode "http";
option juniper.config-file-name "/ztp/ex2200.conf";
option juniper.image-file-name "/ztp/ex2200.tgz";
log(info, "found EX2200 locally");
}
if option vendor-class-identifier ~~ "^Juniper-ex2300-" {
vendor-option-space juniper;
option juniper.image-file-type "symlink";
option juniper.transfer-mode "http";
option juniper.config-file-name "/ztp/ex2300.conf";
option juniper.image-file-name "/ztp/ex2300.tgz";
log(info, "found EX2300 locally");
}
else { log(info, substring(option vendor-class-identifier,0,22)); }
range dynamic-bootp 10.240.240.242 10.240.240.250;
}
}
# add more sections like the previous, for remote networks
=========================================================
ansible yaml for doing os-upgrades of ex2200 (may not always work all the way).
This relies on having credentials/login in ansible under the label 'ansiblefw'
and an inventory from which you can look up targets
The playbook will install the os, wait for the device to reboot, and do a
snapshot and rescue config afterwards.
With ex2200, we do not always detect that the reboot command goes through, in which
case the script considers the device to have failed. We must then do the snapshot/rescue
config manually, or with another playbook.
Fire it like this:
ansible-playbook this.yaml -e victims=hostname1:hostname2:hostname3:and:so:on
=================this.yaml=================================
---
- name: Install Junos OS
hosts: "{{ victims }}"
roles:
- Juniper.junos
connection: local
gather_facts: no
vars_files:
- group_vars/all
vars:
wait_time: 3600
OS_version: "12.3R12-S17"
OS_package: "jinstall-ex-2200-12.3R12-S17-domestic-signed.tgz"
pkg_dir: "/path/to/file/above/"
ansible_command_timeout: 3600
log_dir: "log/"
tasks:
- name: Checking NETCONF connectivity
wait_for: host={{ inventory_hostname }} port=22 timeout=5
- name: Install Junos OS package
juniper_junos_software:
reboot: yes
provider: "{{ ansiblefw }}"
version: "{{ OS_version }}"
package: "{{ pkg_dir }}/{{ OS_package }}"
logfile: "{{ log_dir }}/software.log"
install_timeout: 1800
checksum_timeout: 900
cleanfs_timeout: 900
reboot_pause: 1
register: sw
notify:
- wait_reboot
- meta: flush_handlers
- name: Checking NETCONF connectivity after reboot
wait_for: host={{ inventory_hostname }} port=22 timeout=15
- name: Mirror system, autorecovery state save
juniper_junos_command:
provider: "{{ ansiblefw }}"
commands:
- "request system snapshot slice alternate"
- "request system configuration rescue save"
register: junos_result
- name: Print mirror system response
debug:
var: junos_result
handlers:
- name: wait_reboot
wait_for: delay=65 host={{ inventory_hostname }} port=22 timeout={{ wait_time }}
when: not sw.check_mode
==================================================
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment