First batch of incidental integration tests. (#67717)

* Initial copy of incidental network tests.

* Update incidental test aliases.

* Add incidental tests to CI.

* Rewrite module references in tests.

This should not be necessary once module redirection is supported.

* Rewrite target references for renamed targets.

* Add support collections for incidental tests.

* Add ignores for test support code.

* Remove echo used for debugging.
pull/67722/head
Matt Clay 4 years ago committed by GitHub
parent 81ffc315a4
commit 4e571248a9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -68,6 +68,11 @@ matrix:
- env: T=vyos/1.1.8/2.7/1
- env: T=vyos/1.1.8/3.6/1
- env: T=i/ios/csr1000v//1
- env: T=i/vyos/1.1.8/2.7/1
- env: T=i/vyos/1.1.8/3.6/1
- env: T=aix/7.2/1
- env: T=osx/10.11/1
- env: T=rhel/7.6/1

@ -0,0 +1,2 @@
shippable/ios/incidental
network/ios

@ -0,0 +1,17 @@
---
- name: collect all cli test cases
find:
paths: "{{ role_path }}/tests/cli"
patterns: "{{ testcase }}.yaml"
register: test_cases
delegate_to: localhost
- name: set test_items
set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
- name: run test cases (connection=ansible.netcommon.network_cli)
include: "{{ test_case_to_run }}"
with_items: "{{ test_items }}"
loop_control:
loop_var: test_case_to_run
tags: connection_network_cli

@ -0,0 +1,2 @@
---
- { include: cli.yaml, tags: ['cli'] }

@ -0,0 +1,52 @@
---
- debug: msg="START ios cli/net_get.yaml on connection={{ ansible_connection }}"
# Add minimal testcase to check args are passed correctly to
# implementation module and module run is successful.
- name: setup
cisco.ios.ios_config:
lines:
- ip ssh version 2
- ip scp server enable
- username {{ ansible_ssh_user }} privilege 15
match: none
- name: setup (copy file to be fetched from device)
ansible.netcommon.net_put:
src: ios1.cfg
register: result
- name: setup (remove file from localhost if present)
file:
path: ios_{{ inventory_hostname }}.cfg
state: absent
delegate_to: localhost
- name: get the file from device with relative destination
ansible.netcommon.net_get:
src: ios1.cfg
dest: 'ios_{{ inventory_hostname }}.cfg'
register: result
- assert:
that:
- result.changed == true
- name: Idempotency check
ansible.netcommon.net_get:
src: ios1.cfg
dest: 'ios_{{ inventory_hostname }}.cfg'
register: result
- assert:
that:
- result.changed == false
- name: setup (remove file from localhost if present)
file:
path: ios_{{ inventory_hostname }}.cfg
state: absent
delegate_to: localhost
- debug: msg="END ios cli/net_get.yaml on connection={{ ansible_connection }}"

@ -0,0 +1,73 @@
---
- debug:
msg: "START ios cli/net_put.yaml on connection={{ ansible_connection }}"
# Add minimal testcase to check args are passed correctly to
# implementation module and module run is successful.
- name: setup
cisco.ios.ios_config:
lines:
- ip ssh version 2
- ip scp server enable
- username {{ ansible_ssh_user }} privilege 15
match: none
- name: Delete existing files if present on remote host
cisco.ios.ios_command:
commands: "{{ item }}"
loop:
- delete /force ios1.cfg
- delete /force ios.cfg
- delete /force nonascii.bin
ignore_errors: true
- name: copy file from controller to ios + scp (Default)
ansible.netcommon.net_put:
src: ios1.cfg
register: result
- assert:
that:
- result.changed == true
- name: Idempotency Check
ansible.netcommon.net_put:
src: ios1.cfg
register: result
- assert:
that:
- result.changed == false
- name: copy file from controller to ios + dest specified
ansible.netcommon.net_put:
src: ios1.cfg
dest: ios.cfg
register: result
- assert:
that:
- result.changed == true
- name: copy file with non-ascii characters to ios in template mode(Fail case)
ansible.netcommon.net_put:
src: nonascii.bin
mode: 'text'
register: result
ignore_errors: true
- assert:
that:
- result.failed == true
- name: copy file with non-ascii characters to ios in default mode(binary)
ansible.netcommon.net_put:
src: nonascii.bin
register: result
- assert:
that:
- result.changed == true
- debug: msg="END ios cli/net_put.yaml on connection={{ ansible_connection }}"

@ -0,0 +1,2 @@
shippable/vyos/incidental
network/vyos

@ -0,0 +1,22 @@
---
- name: collect all cli test cases
find:
paths: "{{ role_path }}/tests/cli"
patterns: "{{ testcase }}.yaml"
register: test_cases
delegate_to: localhost
- name: set test_items
set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
- name: run test case (connection=ansible.netcommon.network_cli)
include: "{{ test_case_to_run }} ansible_connection=ansible.netcommon.network_cli"
with_items: "{{ test_items }}"
loop_control:
loop_var: test_case_to_run
- name: run test case (connection=local)
include: "{{ test_case_to_run }} ansible_connection=local"
with_first_found: "{{ test_items }}"
loop_control:
loop_var: test_case_to_run

@ -0,0 +1,16 @@
---
- name: collect all cli_config test cases
find:
paths: "{{ role_path }}/tests/cli_config"
patterns: "{{ testcase }}.yaml"
register: test_cases
delegate_to: localhost
- name: set test_items
set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
- name: run test case (connection=ansible.netcommon.network_cli)
include: "{{ test_case_to_run }} ansible_connection=ansible.netcommon.network_cli"
with_items: "{{ test_items }}"
loop_control:
loop_var: test_case_to_run

@ -0,0 +1,3 @@
---
- {include: cli.yaml, tags: ['cli']}
- {include: cli_config.yaml, tags: ['cli_config']}

@ -0,0 +1,113 @@
---
- debug: msg="START vyos/backup.yaml on connection={{ ansible_connection }}"
- name: collect any backup files
find:
paths: "{{ role_path }}/backup"
pattern: "{{ inventory_hostname_short }}_config*"
register: backup_files
connection: local
- name: delete backup files
file:
path: "{{ item.path }}"
state: absent
with_items: "{{backup_files.files|default([])}}"
- name: take configure backup
vyos.vyos.vyos_config:
backup: true
register: result
- assert:
that:
- "result.changed == true"
- name: collect any backup files
find:
paths: "{{ role_path }}/backup"
pattern: "{{ inventory_hostname_short }}_config*"
register: backup_files
connection: local
- assert:
that:
- "backup_files.files is defined"
- name: delete configurable backup file path
file:
path: "{{ item }}"
state: absent
with_items:
- "{{ role_path }}/backup_test_dir/"
- "{{ role_path }}/backup/backup.cfg"
- name: take configuration backup in custom filename and directory path
vyos.vyos.vyos_config:
backup: true
backup_options:
filename: backup.cfg
dir_path: "{{ role_path }}/backup_test_dir/{{ inventory_hostname_short }}"
become: true
register: result
- assert:
that:
- "result.changed == true"
- name: check if the backup file-1 exist
find:
paths: "{{ role_path }}/backup_test_dir/{{ inventory_hostname_short }}/backup.cfg"
register: backup_file
connection: local
- assert:
that:
- "backup_file.files is defined"
- name: take configuration backup in custom filename
vyos.vyos.vyos_config:
backup: true
backup_options:
filename: backup.cfg
become: true
register: result
- assert:
that:
- "result.changed == true"
- name: check if the backup file-2 exist
find:
paths: "{{ role_path }}/backup/backup.cfg"
register: backup_file
connection: local
- assert:
that:
- "backup_file.files is defined"
- name: take configuration backup in custom path and default filename
vyos.vyos.vyos_config:
backup: true
backup_options:
dir_path: "{{ role_path }}/backup_test_dir/{{ inventory_hostname_short }}"
become: true
register: result
- assert:
that:
- "result.changed == true"
- name: check if the backup file-3 exist
find:
paths: "{{ role_path }}/backup_test_dir/{{ inventory_hostname_short }}"
pattern: "{{ inventory_hostname_short }}_config*"
register: backup_file
connection: local
- assert:
that:
- "backup_file.files is defined"
- debug: msg="END vyos/backup.yaml on connection={{ ansible_connection }}"

@ -0,0 +1,63 @@
---
- debug: msg="START cli/config_check.yaml on connection={{ ansible_connection }}"
- name: setup- ensure interface is not present
vyos.vyos.vyos_config:
lines: delete interfaces loopback lo
- name: setup- create interface
vyos.vyos.vyos_config:
lines:
- interfaces
- interfaces loopback lo
- interfaces loopback lo description test
register: result
# note collapsing the duplicate lines doesn't work if
# lines:
# - interfaces loopback lo description test
# - interfaces loopback lo
# - interfaces
- name: Check that multiple duplicate lines collapse into a single commands
assert:
that:
- "{{ result.commands|length }} == 1"
- name: Check that set is correctly prepended
assert:
that:
- "result.commands[0] == 'set interfaces loopback lo description test'"
- name: configure config_check config command
vyos.vyos.vyos_config:
lines: delete interfaces loopback lo
register: result
- assert:
that:
- "result.changed == true"
- name: check config_check config command idempontent
vyos.vyos.vyos_config:
lines: delete interfaces loopback lo
register: result
- assert:
that:
- "result.changed == false"
- name: check multiple line config filter is working
vyos.vyos.vyos_config:
lines:
- set system login user esa level admin
- set system login user esa authentication encrypted-password '!abc!'
- set system login user vyos level admin
- set system login user vyos authentication encrypted-password 'abc'
register: result
- assert:
that:
- "{{ result.filtered|length }} == 2"
- debug: msg="END cli/config_check.yaml on connection={{ ansible_connection }}"

@ -0,0 +1,34 @@
---
- debug: msg="START cli/comment.yaml on connection={{ ansible_connection }}"
- name: setup
vyos.vyos.vyos_config:
lines: set system host-name {{ inventory_hostname_short }}
match: none
- name: configure using comment
vyos.vyos.vyos_config:
lines: set system host-name foo
comment: this is a test
register: result
- assert:
that:
- "result.changed == true"
- "'set system host-name foo' in result.commands"
- name: collect system commits
vyos.vyos.vyos_command:
commands: show system commit
register: result
- assert:
that:
- "'this is a test' in result.stdout_lines[0][1]"
- name: teardown
vyos.vyos.vyos_config:
lines: set system host-name {{ inventory_hostname_short }}
match: none
- debug: msg="END cli/comment.yaml on connection={{ ansible_connection }}"

@ -0,0 +1,54 @@
---
- debug: msg="START cli/save.yaml on connection={{ ansible_connection }}"
- name: setup
vyos.vyos.vyos_config:
lines: set system host-name {{ inventory_hostname_short }}
match: none
- name: configure hostaname and save
vyos.vyos.vyos_config:
lines: set system host-name foo
save: true
register: result
- assert:
that:
- "result.changed == true"
- "'set system host-name foo' in result.commands"
- name: configure hostaname and don't save
vyos.vyos.vyos_config:
lines: set system host-name bar
register: result
- assert:
that:
- "result.changed == true"
- "'set system host-name bar' in result.commands"
- name: save config
vyos.vyos.vyos_config:
save: true
register: result
- assert:
that:
- "result.changed == true"
- name: save config again
vyos.vyos.vyos_config:
save: true
register: result
- assert:
that:
- "result.changed == false"
- name: teardown
vyos.vyos.vyos_config:
lines: set system host-name {{ inventory_hostname_short }}
match: none
save: true
- debug: msg="END cli/simple.yaml on connection={{ ansible_connection }}"

@ -0,0 +1,53 @@
---
- debug: msg="START cli/simple.yaml on connection={{ ansible_connection }}"
- name: setup
vyos.vyos.vyos_config:
lines: set system host-name {{ inventory_hostname_short }}
match: none
- name: configure simple config command
vyos.vyos.vyos_config:
lines: set system host-name foo
register: result
- assert:
that:
- "result.changed == true"
- "'set system host-name foo' in result.commands"
- name: check simple config command idempontent
vyos.vyos.vyos_config:
lines: set system host-name foo
register: result
- assert:
that:
- "result.changed == false"
- name: Delete services
vyos.vyos.vyos_config: &del
lines:
- delete service lldp
- delete protocols static
- name: Configuring when commands starts with whitespaces
vyos.vyos.vyos_config:
src: "{{ role_path }}/tests/cli/config.cfg"
register: result
- assert:
that:
- "result.changed == true"
- '"set service lldp" in result.commands'
- '"set protocols static" in result.commands'
- name: Delete services
vyos.vyos.vyos_config: *del
- name: teardown
vyos.vyos.vyos_config:
lines: set system host-name {{ inventory_hostname_short }}
match: none
- debug: msg="END cli/simple.yaml on connection={{ ansible_connection }}"

@ -0,0 +1,114 @@
---
- debug: msg="END cli_config/backup.yaml on connection={{ ansible_connection }}"
- name: delete configurable backup file path
file:
path: "{{ item }}"
state: absent
with_items:
- "{{ role_path }}/backup_test_dir/"
- "{{ role_path }}/backup/backup.cfg"
- name: collect any backup files
find:
paths: "{{ role_path }}/backup"
pattern: "{{ inventory_hostname_short }}_config*"
register: backup_files
connection: local
- name: delete backup files
file:
path: "{{ item.path }}"
state: absent
with_items: "{{backup_files.files|default([])}}"
- name: take config backup
ansible.netcommon.cli_config:
backup: true
become: true
register: result
- assert:
that:
- "result.changed == true"
- name: collect any backup files
find:
paths: "{{ role_path }}/backup"
pattern: "{{ inventory_hostname_short }}_config*"
register: backup_files
connection: local
- assert:
that:
- "backup_files.files is defined"
- name: take configuration backup in custom filename and directory path
ansible.netcommon.cli_config:
backup: true
backup_options:
filename: backup.cfg
dir_path: "{{ role_path }}/backup_test_dir/{{ inventory_hostname_short }}"
become: true
register: result
- assert:
that:
- "result.changed == true"
- name: check if the backup file-1 exist
find:
paths: "{{ role_path }}/backup_test_dir/{{ inventory_hostname_short }}/backup.cfg"
register: backup_file
connection: local
- assert:
that:
- "backup_file.files is defined"
- name: take configuration backup in custom filename
ansible.netcommon.cli_config:
backup: true
backup_options:
filename: backup.cfg
become: true
register: result
- assert:
that:
- "result.changed == true"
- name: check if the backup file-2 exist
find:
paths: "{{ role_path }}/backup/backup.cfg"
register: backup_file
connection: local
- assert:
that:
- "backup_file.files is defined"
- name: take configuration backup in custom path and default filename
ansible.netcommon.cli_config:
backup: true
backup_options:
dir_path: "{{ role_path }}/backup_test_dir/{{ inventory_hostname_short }}"
become: true
register: result
- assert:
that:
- "result.changed == true"
- name: check if the backup file-3 exist
find:
paths: "{{ role_path }}/backup_test_dir/{{ inventory_hostname_short }}"
pattern: "{{ inventory_hostname_short }}_config*"
register: backup_file
connection: local
- assert:
that:
- "backup_file.files is defined"
- debug: msg="END cli_config/backup.yaml on connection={{ ansible_connection }}"

@ -0,0 +1,28 @@
---
- debug: msg="START cli_config/cli_basic.yaml on connection={{ ansible_connection }}"
- name: setup - remove interface description
ansible.netcommon.cli_config: &rm
config: delete interfaces loopback lo description
- name: configure device with config
ansible.netcommon.cli_config: &conf
config: set interfaces loopback lo description 'this is a test'
register: result
- assert:
that:
- "result.changed == true"
- name: Idempotence
ansible.netcommon.cli_config: *conf
register: result
- assert:
that:
- "result.changed == false"
- name: teardown
ansible.netcommon.cli_config: *rm
- debug: msg="END cli_config/cli_basic.yaml on connection={{ ansible_connection }}"

@ -0,0 +1,30 @@
---
- debug: msg="START cli_config/cli_comment.yaml on connection={{ ansible_connection }}"
- name: setup
ansible.netcommon.cli_config: &rm
config: set system host-name {{ inventory_hostname_short }}
- name: configure using comment
ansible.netcommon.cli_config:
config: set system host-name foo
commit_comment: this is a test
register: result
- assert:
that:
- "result.changed == true"
- name: collect system commits
vyos.vyos.vyos_command:
commands: show system commit
register: result
- assert:
that:
- "'this is a test' in result.stdout_lines[0][1]"
- name: teardown
ansible.netcommon.cli_config: *rm
- debug: msg="END cli_config/cli_comment.yaml on connection={{ ansible_connection }}"

@ -0,0 +1,2 @@
shippable/vyos/incidental
network/vyos

@ -0,0 +1,3 @@
---
dependencies:
- incidental_vyos_prepare_tests

@ -0,0 +1,19 @@
---
- name: Collect all cli test cases
find:
paths: "{{ role_path }}/tests/cli"
patterns: "{{ testcase }}.yaml"
use_regex: true
register: test_cases
delegate_to: localhost
- name: Set test_items
set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
- name: Run test case (connection=ansible.netcommon.network_cli)
include: "{{ test_case_to_run }}"
vars:
ansible_connection: ansible.netcommon.network_cli
with_items: "{{ test_items }}"
loop_control:
loop_var: test_case_to_run

@ -0,0 +1,14 @@
---
- name: Setup
ansible.netcommon.cli_config:
config: "{{ lines }}"
vars:
lines: |
set service lldp interface eth1
set service lldp interface eth1 location civic-based country-code US
set service lldp interface eth1 location civic-based ca-type 0 ca-value ENGLISH
set service lldp interface eth2
set service lldp interface eth2 location coordinate-based latitude 33.524449N
set service lldp interface eth2 location coordinate-based altitude 2200
set service lldp interface eth2 location coordinate-based datum WGS84
set service lldp interface eth2 location coordinate-based longitude 222.267255W

@ -0,0 +1,10 @@
---
- name: Setup
ansible.netcommon.cli_config:
config: "{{ lines }}"
vars:
lines: |
set service lldp interface eth2
set service lldp interface eth2 location civic-based country-code US
set service lldp interface eth2 location civic-based ca-type 0 ca-value ENGLISH
set service lldp interface eth2 disable

@ -0,0 +1,8 @@
---
- name: Remove Config
ansible.netcommon.cli_config:
config: "{{ lines }}"
vars:
lines: |
delete service lldp interface
delete service lldp

@ -0,0 +1,46 @@
---
- debug:
msg: "Start vyos_lldp_interfaces deleted integration tests ansible_connection={{ ansible_connection }}"
- include_tasks: _populate.yaml
- block:
- name: Delete attributes of given LLDP interfaces.
vyos.vyos.vyos_lldp_interfaces: &deleted
config:
- name: 'eth1'
- name: 'eth2'
state: deleted
register: result
- name: Assert that the before dicts were correctly generated
assert:
that:
- "{{ populate | symmetric_difference(result['before']) |length == 0 }}"
- name: Assert that the correct set of commands were generated
assert:
that:
- "{{ deleted['commands'] | symmetric_difference(result['commands']) |length == 0 }}"
- name: Assert that the after dicts were correctly generated
assert:
that:
- "{{ deleted['after'] | symmetric_difference(result['after']) |length == 0 }}"
- name: Delete attributes of given interfaces (IDEMPOTENT)
vyos.vyos.vyos_lldp_interfaces: *deleted
register: result
- name: Assert that the previous task was idempotent
assert:
that:
- "result.changed == false"
- "result.commands|length == 0"
- name: Assert that the before dicts were correctly generated
assert:
that:
- "{{ deleted['after'] | symmetric_difference(result['before']) |length == 0 }}"
always:
- include_tasks: _remove_config.yaml

@ -0,0 +1,36 @@
---
- debug:
msg: "START vyos_lldp_interfaces empty_config integration tests on connection={{ ansible_connection }}"
- name: Merged with empty config should give appropriate error message
vyos.vyos.vyos_lldp_interfaces:
config:
state: merged
register: result
ignore_errors: true
- assert:
that:
- result.msg == 'value of config parameter must not be empty for state merged'
- name: Replaced with empty config should give appropriate error message
vyos.vyos.vyos_lldp_interfaces:
config:
state: replaced
register: result
ignore_errors: true
- assert:
that:
- result.msg == 'value of config parameter must not be empty for state replaced'
- name: Overridden with empty config should give appropriate error message
vyos.vyos.vyos_lldp_interfaces:
config:
state: overridden
register: result
ignore_errors: true
- assert:
that:
- result.msg == 'value of config parameter must not be empty for state overridden'

@ -0,0 +1,58 @@
---
- debug:
msg: "START vyos_lldp_interfaces merged integration tests on connection={{ ansible_connection }}"
- include_tasks: _remove_config.yaml
- block:
- name: Merge the provided configuration with the exisiting running configuration
vyos.vyos.vyos_lldp_interfaces: &merged
config:
- name: 'eth1'
location:
civic_based:
country_code: 'US'
ca_info:
- ca_type: 0
ca_value: 'ENGLISH'
- name: 'eth2'
location:
coordinate_based:
altitude: 2200
datum: 'WGS84'
longitude: '222.267255W'
latitude: '33.524449N'
state: merged
register: result
- name: Assert that before dicts were correctly generated
assert:
that: "{{ merged['before'] | symmetric_difference(result['before']) |length == 0 }}"
- name: Assert that correct set of commands were generated
assert:
that:
- "{{ merged['commands'] | symmetric_difference(result['commands']) |length == 0 }}"
- name: Assert that after dicts was correctly generated
assert:
that:
- "{{ merged['after'] | symmetric_difference(result['after']) |length == 0 }}"
- name: Merge the provided configuration with the existing running configuration (IDEMPOTENT)
vyos.vyos.vyos_lldp_interfaces: *merged
register: result
- name: Assert that the previous task was idempotent
assert:
that:
- "result['changed'] == false"
- name: Assert that before dicts were correctly generated
assert:
that:
- "{{ merged['after'] | symmetric_difference(result['before']) |length == 0 }}"
always:
- include_tasks: _remove_config.yaml

@ -0,0 +1,49 @@
---
- debug:
msg: "START vyos_lldp_interfaces overridden integration tests on connection={{ ansible_connection }}"
- include_tasks: _remove_config.yaml
- include_tasks: _populate_intf.yaml
- block:
- name: Overrides all device configuration with provided configuration
vyos.vyos.vyos_lldp_interfaces: &overridden
config:
- name: 'eth2'
location:
elin: '0000000911'
state: overridden
register: result
- name: Assert that before dicts were correctly generated
assert:
that:
- "{{ populate_intf | symmetric_difference(result['before']) |length == 0 }}"
- name: Assert that correct commands were generated
assert:
that:
- "{{ overridden['commands'] | symmetric_difference(result['commands']) |length == 0 }}"
- name: Assert that after dicts were correctly generated
assert:
that:
- "{{ overridden['after'] | symmetric_difference(result['after']) |length == 0 }}"
- name: Overrides all device configuration with provided configurations (IDEMPOTENT)
vyos.vyos.vyos_lldp_interfaces: *overridden
register: result
- name: Assert that the previous task was idempotent
assert:
that:
- "result['changed'] == false"
- name: Assert that before dicts were correctly generated
assert:
that:
- "{{ overridden['after'] | symmetric_difference(result['before']) |length == 0 }}"
always:
- include_tasks: _remove_config.yaml

@ -0,0 +1,63 @@
---
- debug:
msg: "START vyos_lldp_interfaces replaced integration tests on connection={{ ansible_connection }}"
- include_tasks: _remove_config.yaml
- include_tasks: _populate.yaml
- block:
- name: Replace device configurations of listed LLDP interfaces with provided configurations
vyos.vyos.vyos_lldp_interfaces: &replaced
config:
- name: 'eth2'
enable: false
location:
civic_based:
country_code: 'US'
ca_info:
- ca_type: 0
ca_value: 'ENGLISH'
- name: 'eth1'
enable: false
location:
coordinate_based:
altitude: 2200
datum: 'WGS84'
longitude: '222.267255W'
latitude: '33.524449N'
state: replaced
register: result
- name: Assert that correct set of commands were generated
assert:
that:
- "{{ replaced['commands'] | symmetric_difference(result['commands']) |length == 0 }}"
- name: Assert that before dicts are correctly generated
assert:
that:
- "{{ populate | symmetric_difference(result['before']) |length == 0 }}"
- name: Assert that after dict is correctly generated
assert:
that:
- "{{ replaced['after'] | symmetric_difference(result['after']) |length == 0 }}"
- name: Replace device configurations of listed LLDP interfaces with provided configurarions (IDEMPOTENT)
vyos.vyos.vyos_lldp_interfaces: *replaced
register: result
- name: Assert that task was idempotent
assert:
that:
- "result['changed'] == false"
- name: Assert that before dict is correctly generated
assert:
that:
- "{{ replaced['after'] | symmetric_difference(result['before']) |length == 0 }}"
always:
- include_tasks: _remove_config.yaml

@ -0,0 +1,57 @@
---
- debug:
msg: "START vyos_lldp_interfaces round trip integration tests on connection={{ ansible_connection }}"
- include_tasks: _remove_config.yaml
- block:
- name: Apply the provided configuration (base config)
vyos.vyos.vyos_lldp_interfaces:
config:
- name: 'eth1'
location:
civic_based:
country_code: 'US'
ca_info:
- ca_type: 0
ca_value: 'ENGLISH'
state: merged
register: base_config
- name: Gather lldp_interfaces facts
vyos.vyos.vyos_facts:
gather_subset:
- default
gather_network_resources:
- lldp_interfaces
- name: Apply the provided configuration (config to be reverted)
vyos.vyos.vyos_lldp_interfaces:
config:
- name: 'eth2'
location:
coordinate_based:
altitude: 2200
datum: 'WGS84'
longitude: '222.267255W'
latitude: '33.524449N'
state: merged
register: result
- name: Assert that changes were applied
assert:
that: "{{ round_trip['after'] | symmetric_difference(result['after']) |length == 0 }}"
- name: Revert back to base config using facts round trip
vyos.vyos.vyos_lldp_interfaces:
config: "{{ ansible_facts['network_resources']['lldp_interfaces'] }}"
state: overridden
register: revert
- name: Assert that config was reverted
assert:
that: "{{ base_config['after'] | symmetric_difference(revert['after']) |length == 0 }}"
always:
- include_tasks: _remove_config.yaml

@ -0,0 +1,130 @@
---
merged:
before: []
commands:
- "set service lldp interface eth1 location civic-based country-code 'US'"
- "set service lldp interface eth1 location civic-based ca-type 0 ca-value 'ENGLISH'"
- "set service lldp interface eth1"
- "set service lldp interface eth2 location coordinate-based latitude '33.524449N'"
- "set service lldp interface eth2 location coordinate-based altitude '2200'"
- "set service lldp interface eth2 location coordinate-based datum 'WGS84'"
- "set service lldp interface eth2 location coordinate-based longitude '222.267255W'"
- "set service lldp interface eth2 location coordinate-based latitude '33.524449N'"
- "set service lldp interface eth2 location coordinate-based altitude '2200'"
- "set service lldp interface eth2 location coordinate-based datum 'WGS84'"
- "set service lldp interface eth2 location coordinate-based longitude '222.267255W'"
- "set service lldp interface eth2"
after:
- name: 'eth1'
location:
civic_based:
country_code: 'US'
ca_info:
- ca_type: 0
ca_value: 'ENGLISH'
- name: 'eth2'
location:
coordinate_based:
altitude: 2200
datum: 'WGS84'
longitude: '222.267255W'
latitude: '33.524449N'
populate:
- name: 'eth1'
location:
civic_based:
country_code: 'US'
ca_info:
- ca_type: 0
ca_value: 'ENGLISH'
- name: 'eth2'
location:
coordinate_based:
altitude: 2200
datum: 'WGS84'
longitude: '222.267255W'
latitude: '33.524449N'
replaced:
commands:
- "delete service lldp interface eth2 location"
- "set service lldp interface eth2 'disable'"
- "set service lldp interface eth2 location civic-based country-code 'US'"
- "set service lldp interface eth2 location civic-based ca-type 0 ca-value 'ENGLISH'"
- "delete service lldp interface eth1 location"
- "set service lldp interface eth1 'disable'"
- "set service lldp interface eth1 location coordinate-based latitude '33.524449N'"
- "set service lldp interface eth1 location coordinate-based altitude '2200'"
- "set service lldp interface eth1 location coordinate-based datum 'WGS84'"
- "set service lldp interface eth1 location coordinate-based longitude '222.267255W'"
after:
- name: 'eth2'
enable: false
location:
civic_based:
country_code: 'US'
ca_info:
- ca_type: 0
ca_value: 'ENGLISH'
- name: 'eth1'
enable: false
location:
coordinate_based:
altitude: 2200
datum: 'WGS84'
longitude: '222.267255W'
latitude: '33.524449N'
populate_intf:
- name: 'eth2'
enable: false
location:
civic_based:
country_code: 'US'
ca_info:
- ca_type: 0
ca_value: 'ENGLISH'
overridden:
commands:
- "delete service lldp interface eth2 location"
- "delete service lldp interface eth2 'disable'"
- "set service lldp interface eth2 location elin '0000000911'"
after:
- name: 'eth2'
location:
elin: 0000000911
deleted:
commands:
- "delete service lldp interface eth1"
- "delete service lldp interface eth2"
after: []
round_trip:
after:
- name: 'eth1'
location:
civic_based:
country_code: 'US'
ca_info:
- ca_type: 0
ca_value: 'ENGLISH'
- name: 'eth2'
location:
coordinate_based:
altitude: 2200
datum: 'WGS84'
longitude: '222.267255W'
latitude: '33.524449N'

@ -0,0 +1,2 @@
shippable/vyos/incidental
network/vyos

@ -0,0 +1,22 @@
---
- name: collect all cli test cases
find:
paths: "{{ role_path }}/tests/cli"
patterns: "{{ testcase }}.yaml"
register: test_cases
delegate_to: localhost
- name: set test_items
set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
- name: run test case (connection=ansible.netcommon.network_cli)
include: "{{ test_case_to_run }} ansible_connection=ansible.netcommon.network_cli"
with_items: "{{ test_items }}"
loop_control:
loop_var: test_case_to_run
- name: run test case (connection=local)
include: "{{ test_case_to_run }} ansible_connection=local"
with_first_found: "{{ test_items }}"
loop_control:
loop_var: test_case_to_run

@ -0,0 +1,2 @@
---
- {include: cli.yaml, tags: ['cli']}

@ -0,0 +1,126 @@
---
- debug: msg="START cli/basic.yaml on connection={{ ansible_connection }}"
- name: set-up logging
vyos.vyos.vyos_logging:
dest: console
facility: all
level: info
state: present
register: result
- assert:
that:
- 'result.changed == true'
- '"set system syslog console facility all level info" in result.commands'
- name: set-up logging again (idempotent)
vyos.vyos.vyos_logging:
dest: console
facility: all
level: info
state: present
register: result
- assert:
that:
- 'result.changed == false'
- name: file logging
vyos.vyos.vyos_logging:
dest: file
name: test
facility: all
level: notice
state: present
register: result
- assert:
that:
- 'result.changed == true'
- '"set system syslog file test facility all level notice" in result.commands'
- name: file logging again (idempotent)
vyos.vyos.vyos_logging:
dest: file
name: test
facility: all
level: notice
state: present
register: result
- assert:
that:
- 'result.changed == false'
- name: delete logging
vyos.vyos.vyos_logging:
dest: file
name: test
facility: all
level: notice
state: absent
register: result
- assert:
that:
- 'result.changed == true'
- '"delete system syslog file test facility all level notice" in result.commands'
- name: delete logging again (idempotent)
vyos.vyos.vyos_logging:
dest: file
name: test
facility: all
level: notice
state: absent
register: result
- assert:
that:
- 'result.changed == false'
- name: Add logging collections
vyos.vyos.vyos_logging:
aggregate:
- {dest: file, name: test1, facility: all, level: info}
- {dest: file, name: test2, facility: news, level: debug}
state: present
register: result
- assert:
that:
- 'result.changed == true'
- '"set system syslog file test1 facility all level info" in result.commands'
- '"set system syslog file test2 facility news level debug" in result.commands'
- name: Add and remove logging collections with overrides
vyos.vyos.vyos_logging:
aggregate:
- {dest: console, facility: all, level: info}
- {dest: file, name: test1, facility: all, level: info, state: absent}
- {dest: console, facility: daemon, level: warning}
state: present
register: result
- assert:
that:
- 'result.changed == true'
- '"delete system syslog file test1 facility all level info" in result.commands'
- '"set system syslog console facility daemon level warning" in result.commands'
- name: Remove logging collections
vyos.vyos.vyos_logging:
aggregate:
- {dest: console, facility: all, level: info}
- {dest: console, facility: daemon, level: warning}
- {dest: file, name: test2, facility: news, level: debug}
state: absent
register: result
- assert:
that:
- 'result.changed == true'
- '"delete system syslog console facility all level info" in result.commands'
- '"delete system syslog console facility daemon level warning" in result.commands'
- '"delete system syslog file test2 facility news level debug" in result.commands'

@ -0,0 +1,39 @@
---
- debug: msg="START vyos cli/net_logging.yaml on connection={{ ansible_connection }}"
# Add minimal testcase to check args are passed correctly to
# implementation module and module run is successful.
- name: delete logging - setup
ansible.netcommon.net_logging:
dest: file
name: test
facility: all
level: notice
state: absent
register: result
- name: file logging using platform agnostic module
ansible.netcommon.net_logging:
dest: file
name: test
facility: all
level: notice
state: present
register: result
- assert:
that:
- 'result.changed == true'
- '"set system syslog file test facility all level notice" in result.commands'
- name: delete logging - teardown
ansible.netcommon.net_logging:
dest: file
name: test
facility: all
level: notice
state: absent
register: result
- debug: msg="END vyos cli/net_logging.yaml on connection={{ ansible_connection }}"

@ -0,0 +1,13 @@
---
- name: Ensure required interfaces are present in running-config
ansible.netcommon.cli_config:
config: "{{ lines }}"
vars:
lines: |
set interfaces ethernet eth0 address dhcp
set interfaces ethernet eth0 speed auto
set interfaces ethernet eth0 duplex auto
set interfaces ethernet eth1
set interfaces ethernet eth2
delete interfaces loopback lo
ignore_errors: true

@ -0,0 +1,2 @@
shippable/vyos/incidental
network/vyos

@ -0,0 +1,22 @@
---
- name: collect all cli test cases
find:
paths: "{{ role_path }}/tests/cli"
patterns: "{{ testcase }}.yaml"
register: test_cases
delegate_to: localhost
- name: set test_items
set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
- name: run test case (connection=ansible.netcommon.network_cli)
include: "{{ test_case_to_run }} ansible_connection=ansible.netcommon.network_cli"
with_items: "{{ test_items }}"
loop_control:
loop_var: test_case_to_run
- name: run test case (connection=local)
include: "{{ test_case_to_run }} ansible_connection=local"
with_first_found: "{{ test_items }}"
loop_control:
loop_var: test_case_to_run

@ -0,0 +1,2 @@
---
- {include: cli.yaml, tags: ['cli']}

@ -0,0 +1,120 @@
---
- debug: msg="START cli/basic.yaml on connection={{ ansible_connection }}"
- name: create static route
vyos.vyos.vyos_static_route:
prefix: 172.24.0.0/24
next_hop: 192.168.42.64
state: present
register: result
- assert:
that:
- 'result.changed == true'
- '"set protocols static route 172.24.0.0/24 next-hop 192.168.42.64" in result.commands'
- name: create static route again (idempotent)
vyos.vyos.vyos_static_route:
prefix: 172.24.0.0
mask: 24
next_hop: 192.168.42.64
state: present
register: result
- assert:
that:
- 'result.changed == false'
- name: modify admin distance of static route
vyos.vyos.vyos_static_route:
prefix: 172.24.0.0/24
next_hop: 192.168.42.64
admin_distance: 1
state: present
register: result
- assert:
that:
- 'result.changed == true'
- '"set protocols static route 172.24.0.0/24 next-hop 192.168.42.64 distance 1" in result.commands'
- name: modify admin distance of static route again (idempotent)
vyos.vyos.vyos_static_route:
prefix: 172.24.0.0
mask: 24
next_hop: 192.168.42.64
admin_distance: 1
state: present
register: result
- assert:
that:
- 'result.changed == false'
- name: delete static route
vyos.vyos.vyos_static_route:
prefix: 172.24.0.0/24
next_hop: 192.168.42.64
admin_distance: 1
state: absent
register: result
- assert:
that:
- 'result.changed == true'
- '"delete protocols static route 172.24.0.0/24" in result.commands'
- name: delete static route again (idempotent)
vyos.vyos.vyos_static_route:
prefix: 172.24.0.0/24
next_hop: 192.168.42.64
admin_distance: 1
state: absent
register: result
- assert:
that:
- 'result.changed == false'
- name: Add static route collections
vyos.vyos.vyos_static_route:
aggregate:
- {prefix: 172.24.1.0/24, next_hop: 192.168.42.64}
- {prefix: 172.24.2.0, mask: 24, next_hop: 192.168.42.64}
state: present
register: result
- assert:
that:
- 'result.changed == true'
- '"set protocols static route 172.24.1.0/24 next-hop 192.168.42.64" in result.commands'
- '"set protocols static route 172.24.2.0/24 next-hop 192.168.42.64" in result.commands'
- name: Add and remove static route collections with overrides
vyos.vyos.vyos_static_route:
aggregate:
- {prefix: 172.24.1.0/24, next_hop: 192.168.42.64}
- {prefix: 172.24.2.0/24, next_hop: 192.168.42.64, state: absent}
- {prefix: 172.24.3.0/24, next_hop: 192.168.42.64}
state: present
register: result
- assert:
that:
- 'result.changed == true'
- '"delete protocols static route 172.24.2.0/24" in result.commands'
- '"set protocols static route 172.24.3.0/24 next-hop 192.168.42.64" in result.commands'
- name: Remove static route collections
vyos.vyos.vyos_static_route:
aggregate:
- {prefix: 172.24.1.0/24, next_hop: 192.168.42.64}
- {prefix: 172.24.3.0/24, next_hop: 192.168.42.64}
state: absent
register: result
- assert:
that:
- 'result.changed == true'
- '"delete protocols static route 172.24.1.0/24" in result.commands'
- '"delete protocols static route 172.24.3.0/24" in result.commands'

@ -0,0 +1,33 @@
---
- debug: msg="START vyos cli/net_static_route.yaml on connection={{ ansible_connection }}"
# Add minimal testcase to check args are passed correctly to
# implementation module and module run is successful.
- name: delete static route - setup
ansible.netcommon.net_static_route:
prefix: 172.24.0.0/24
next_hop: 192.168.42.64
state: absent
register: result
- name: create static route using platform agnostic module
ansible.netcommon.net_static_route:
prefix: 172.24.0.0/24
next_hop: 192.168.42.64
state: present
register: result
- assert:
that:
- 'result.changed == true'
- '"set protocols static route 172.24.0.0/24 next-hop 192.168.42.64" in result.commands'
- name: delete static route - teardown
ansible.netcommon.net_static_route:
prefix: 172.24.0.0/24
next_hop: 192.168.42.64
state: absent
register: result
- debug: msg="END vyos cli/net_static_route.yaml on connection={{ ansible_connection }}"

@ -8241,6 +8241,57 @@ test/lib/ansible_test/_data/requirements/integration.cloud.azure.txt test-constr
test/lib/ansible_test/_data/sanity/pylint/plugins/string_format.py use-compat-six
test/lib/ansible_test/_data/setup/ConfigureRemotingForAnsible.ps1 pslint:PSCustomUseLiteralPath
test/lib/ansible_test/_data/setup/windows-httptester.ps1 pslint:PSCustomUseLiteralPath
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/netconf.py future-import-boilerplate
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/netconf.py metaclass-boilerplate
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/network_agnostic.py future-import-boilerplate
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/network_agnostic.py metaclass-boilerplate
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/compat/ipaddress.py future-import-boilerplate
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/compat/ipaddress.py metaclass-boilerplate
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/compat/ipaddress.py no-unicode-literals
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/compat/ipaddress.py pep8:E203
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/cfg/base.py future-import-boilerplate
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/cfg/base.py metaclass-boilerplate
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/config.py future-import-boilerplate
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/config.py metaclass-boilerplate
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/facts/facts.py future-import-boilerplate
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/facts/facts.py metaclass-boilerplate
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/netconf.py future-import-boilerplate
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/netconf.py metaclass-boilerplate
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/network.py future-import-boilerplate
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/network.py metaclass-boilerplate
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/parsing.py future-import-boilerplate
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/parsing.py metaclass-boilerplate
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/utils.py future-import-boilerplate
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/common/utils.py metaclass-boilerplate
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/netconf/netconf.py future-import-boilerplate
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/netconf/netconf.py metaclass-boilerplate
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/restconf/restconf.py future-import-boilerplate
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/module_utils/network/restconf/restconf.py metaclass-boilerplate
test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/doc_fragments/ios.py future-import-boilerplate
test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/doc_fragments/ios.py metaclass-boilerplate
test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/module_utils/network/ios/ios.py future-import-boilerplate
test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/module_utils/network/ios/ios.py metaclass-boilerplate
test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_command.py future-import-boilerplate
test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_command.py metaclass-boilerplate
test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_config.py future-import-boilerplate
test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_config.py metaclass-boilerplate
test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/modules/ios_config.py pep8:E501
test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/doc_fragments/vyos.py future-import-boilerplate
test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/doc_fragments/vyos.py metaclass-boilerplate
test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/vyos.py future-import-boilerplate
test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/vyos.py metaclass-boilerplate
test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_command.py future-import-boilerplate
test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_command.py metaclass-boilerplate
test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_command.py pep8:E231
test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_command.py pylint:blacklisted-name
test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_config.py future-import-boilerplate
test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_config.py metaclass-boilerplate
test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_facts.py future-import-boilerplate
test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_facts.py metaclass-boilerplate
test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_logging.py future-import-boilerplate
test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_logging.py metaclass-boilerplate
test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_static_route.py future-import-boilerplate
test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_static_route.py metaclass-boilerplate
test/units/config/manager/test_find_ini_config_file.py future-import-boilerplate
test/units/contrib/inventory/test_vmware_inventory.py future-import-boilerplate
test/units/contrib/inventory/test_vmware_inventory.py metaclass-boilerplate

@ -0,0 +1,40 @@
#
# Copyright 2018 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible_collections.ansible.netcommon.plugins.action.network import (
ActionModule as ActionNetworkModule,
)
class ActionModule(ActionNetworkModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
self._config_module = True
if self._play_context.connection.split(".")[-1] != "network_cli":
return {
"failed": True,
"msg": "Connection type %s is not valid for cli_config module"
% self._play_context.connection,
}
return super(ActionModule, self).run(task_vars=task_vars)

@ -0,0 +1,90 @@
# Copyright: (c) 2015, Ansible Inc,
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import copy
from ansible.errors import AnsibleError
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
result = {}
play_context = copy.deepcopy(self._play_context)
play_context.network_os = self._get_network_os(task_vars)
new_task = self._task.copy()
module = self._get_implementation_module(
play_context.network_os, self._task.action
)
if not module:
if self._task.args["fail_on_missing_module"]:
result["failed"] = True
else:
result["failed"] = False
result["msg"] = (
"Could not find implementation module %s for %s"
% (self._task.action, play_context.network_os)
)
return result
new_task.action = module
action = self._shared_loader_obj.action_loader.get(
play_context.network_os,
task=new_task,
connection=self._connection,
play_context=play_context,
loader=self._loader,
templar=self._templar,
shared_loader_obj=self._shared_loader_obj,
)
display.vvvv("Running implementation module %s" % module)
return action.run(task_vars=task_vars)
def _get_network_os(self, task_vars):
if "network_os" in self._task.args and self._task.args["network_os"]:
display.vvvv("Getting network OS from task argument")
network_os = self._task.args["network_os"]
elif self._play_context.network_os:
display.vvvv("Getting network OS from inventory")
network_os = self._play_context.network_os
elif (
"network_os" in task_vars.get("ansible_facts", {})
and task_vars["ansible_facts"]["network_os"]
):
display.vvvv("Getting network OS from fact")
network_os = task_vars["ansible_facts"]["network_os"]
else:
raise AnsibleError(
"ansible_network_os must be specified on this host to use platform agnostic modules"
)
return network_os
def _get_implementation_module(self, network_os, platform_agnostic_module):
module_name = (
network_os.split(".")[-1]
+ "_"
+ platform_agnostic_module.partition("_")[2]
)
if "." in network_os:
fqcn_module = ".".join(network_os.split(".")[0:-1])
implementation_module = fqcn_module + "." + module_name
else:
implementation_module = module_name
if implementation_module not in self._shared_loader_obj.module_loader:
implementation_module = None
return implementation_module

@ -0,0 +1,199 @@
# (c) 2018, Ansible Inc,
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
import re
import uuid
import hashlib
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_text, to_bytes
from ansible.module_utils.connection import Connection, ConnectionError
from ansible.plugins.action import ActionBase
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
socket_path = None
self._get_network_os(task_vars)
persistent_connection = self._play_context.connection.split(".")[-1]
result = super(ActionModule, self).run(task_vars=task_vars)
if persistent_connection != "network_cli":
# It is supported only with network_cli
result["failed"] = True
result["msg"] = (
"connection type %s is not valid for net_get module,"
" please use fully qualified name of network_cli connection type"
% self._play_context.connection
)
return result
try:
src = self._task.args["src"]
except KeyError as exc:
return {
"failed": True,
"msg": "missing required argument: %s" % exc,
}
# Get destination file if specified
dest = self._task.args.get("dest")
if dest is None:
dest = self._get_default_dest(src)
else:
dest = self._handle_dest_path(dest)
# Get proto
proto = self._task.args.get("protocol")
if proto is None:
proto = "scp"
if socket_path is None:
socket_path = self._connection.socket_path
conn = Connection(socket_path)
sock_timeout = conn.get_option("persistent_command_timeout")
try:
changed = self._handle_existing_file(
conn, src, dest, proto, sock_timeout
)
if changed is False:
result["changed"] = changed
result["destination"] = dest
return result
except Exception as exc:
result["msg"] = (
"Warning: %s idempotency check failed. Check dest" % exc
)
try:
conn.get_file(
source=src, destination=dest, proto=proto, timeout=sock_timeout
)
except Exception as exc:
result["failed"] = True
result["msg"] = "Exception received: %s" % exc
result["changed"] = changed
result["destination"] = dest
return result
def _handle_dest_path(self, dest):
working_path = self._get_working_path()
if os.path.isabs(dest) or urlsplit("dest").scheme:
dst = dest
else:
dst = self._loader.path_dwim_relative(working_path, "", dest)
return dst
def _get_src_filename_from_path(self, src_path):
filename_list = re.split("/|:", src_path)
return filename_list[-1]
def _get_default_dest(self, src_path):
dest_path = self._get_working_path()
src_fname = self._get_src_filename_from_path(src_path)
filename = "%s/%s" % (dest_path, src_fname)
return filename
def _handle_existing_file(self, conn, source, dest, proto, timeout):
"""
Determines whether the source and destination file match.
:return: False if source and dest both exist and have matching sha1 sums, True otherwise.
"""
if not os.path.exists(dest):
return True
cwd = self._loader.get_basedir()
filename = str(uuid.uuid4())
tmp_dest_file = os.path.join(cwd, filename)
try:
conn.get_file(
source=source,
destination=tmp_dest_file,
proto=proto,
timeout=timeout,
)
except ConnectionError as exc:
error = to_text(exc)
if error.endswith("No such file or directory"):
if os.path.exists(tmp_dest_file):
os.remove(tmp_dest_file)
return True
try:
with open(tmp_dest_file, "r") as f:
new_content = f.read()
with open(dest, "r") as f:
old_content = f.read()
except (IOError, OSError):
os.remove(tmp_dest_file)
raise
sha1 = hashlib.sha1()
old_content_b = to_bytes(old_content, errors="surrogate_or_strict")
sha1.update(old_content_b)
checksum_old = sha1.digest()
sha1 = hashlib.sha1()
new_content_b = to_bytes(new_content, errors="surrogate_or_strict")
sha1.update(new_content_b)
checksum_new = sha1.digest()
os.remove(tmp_dest_file)
if checksum_old == checksum_new:
return False
return True
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _get_network_os(self, task_vars):
if "network_os" in self._task.args and self._task.args["network_os"]:
display.vvvv("Getting network OS from task argument")
network_os = self._task.args["network_os"]
elif self._play_context.network_os:
display.vvvv("Getting network OS from inventory")
network_os = self._play_context.network_os
elif (
"network_os" in task_vars.get("ansible_facts", {})
and task_vars["ansible_facts"]["network_os"]
):
display.vvvv("Getting network OS from fact")
network_os = task_vars["ansible_facts"]["network_os"]
else:
raise AnsibleError(
"ansible_network_os must be specified on this host"
)
return network_os

@ -0,0 +1,30 @@
# (c) 2017, Ansible Inc,
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible_collections.ansible.netcommon.plugins.action.net_base import (
ActionModule as _ActionModule,
)
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
return result

@ -0,0 +1,235 @@
# (c) 2018, Ansible Inc,
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
import uuid
import hashlib
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_text, to_bytes
from ansible.module_utils.connection import Connection, ConnectionError
from ansible.plugins.action import ActionBase
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
socket_path = None
network_os = self._get_network_os(task_vars).split(".")[-1]
persistent_connection = self._play_context.connection.split(".")[-1]
result = super(ActionModule, self).run(task_vars=task_vars)
if persistent_connection != "network_cli":
# It is supported only with network_cli
result["failed"] = True
result["msg"] = (
"connection type %s is not valid for net_put module,"
" please use fully qualified name of network_cli connection type"
% self._play_context.connection
)
return result
try:
src = self._task.args["src"]
except KeyError as exc:
return {
"failed": True,
"msg": "missing required argument: %s" % exc,
}
src_file_path_name = src
# Get destination file if specified
dest = self._task.args.get("dest")
# Get proto
proto = self._task.args.get("protocol")
if proto is None:
proto = "scp"
# Get mode if set
mode = self._task.args.get("mode")
if mode is None:
mode = "binary"
if mode == "text":
try:
self._handle_template(convert_data=False)
except ValueError as exc:
return dict(failed=True, msg=to_text(exc))
# Now src has resolved file write to disk in current diectory for scp
src = self._task.args.get("src")
filename = str(uuid.uuid4())
cwd = self._loader.get_basedir()
output_file = os.path.join(cwd, filename)
try:
with open(output_file, "wb") as f:
f.write(to_bytes(src, encoding="utf-8"))
except Exception:
os.remove(output_file)
raise
else:
try:
output_file = self._get_binary_src_file(src)
except ValueError as exc:
return dict(failed=True, msg=to_text(exc))
if socket_path is None:
socket_path = self._connection.socket_path
conn = Connection(socket_path)
sock_timeout = conn.get_option("persistent_command_timeout")
if dest is None:
dest = src_file_path_name
try:
changed = self._handle_existing_file(
conn, output_file, dest, proto, sock_timeout
)
if changed is False:
result["changed"] = changed
result["destination"] = dest
return result
except Exception as exc:
result["msg"] = (
"Warning: %s idempotency check failed. Check dest" % exc
)
try:
conn.copy_file(
source=output_file,
destination=dest,
proto=proto,
timeout=sock_timeout,
)
except Exception as exc:
if to_text(exc) == "No response from server":
if network_os == "iosxr":
# IOSXR sometimes closes socket prematurely after completion
# of file transfer
result[
"msg"
] = "Warning: iosxr scp server pre close issue. Please check dest"
else:
result["failed"] = True
result["msg"] = "Exception received: %s" % exc
if mode == "text":
# Cleanup tmp file expanded wih ansible vars
os.remove(output_file)
result["changed"] = changed
result["destination"] = dest
return result
def _handle_existing_file(self, conn, source, dest, proto, timeout):
"""
Determines whether the source and destination file match.
:return: False if source and dest both exist and have matching sha1 sums, True otherwise.
"""
cwd = self._loader.get_basedir()
filename = str(uuid.uuid4())
tmp_source_file = os.path.join(cwd, filename)
try:
conn.get_file(
source=dest,
destination=tmp_source_file,
proto=proto,
timeout=timeout,
)
except ConnectionError as exc:
error = to_text(exc)
if error.endswith("No such file or directory"):
if os.path.exists(tmp_source_file):
os.remove(tmp_source_file)
return True
try:
with open(source, "r") as f:
new_content = f.read()
with open(tmp_source_file, "r") as f:
old_content = f.read()
except (IOError, OSError):
os.remove(tmp_source_file)
raise
sha1 = hashlib.sha1()
old_content_b = to_bytes(old_content, errors="surrogate_or_strict")
sha1.update(old_content_b)
checksum_old = sha1.digest()
sha1 = hashlib.sha1()
new_content_b = to_bytes(new_content, errors="surrogate_or_strict")
sha1.update(new_content_b)
checksum_new = sha1.digest()
os.remove(tmp_source_file)
if checksum_old == checksum_new:
return False
return True
def _get_binary_src_file(self, src):
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit("src").scheme:
source = src
else:
source = self._loader.path_dwim_relative(
working_path, "templates", src
)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError("path specified in src not found")
return source
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _get_network_os(self, task_vars):
if "network_os" in self._task.args and self._task.args["network_os"]:
display.vvvv("Getting network OS from task argument")
network_os = self._task.args["network_os"]
elif self._play_context.network_os:
display.vvvv("Getting network OS from inventory")
network_os = self._play_context.network_os
elif (
"network_os" in task_vars.get("ansible_facts", {})
and task_vars["ansible_facts"]["network_os"]
):
display.vvvv("Getting network OS from fact")
network_os = task_vars["ansible_facts"]["network_os"]
else:
raise AnsibleError(
"ansible_network_os must be specified on this host"
)
return network_os

@ -0,0 +1,31 @@
# (c) 2017, Ansible Inc,
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible_collections.ansible.netcommon.plugins.action.net_base import (
ActionModule as _ActionModule,
)
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
return result

@ -0,0 +1,209 @@
#
# (c) 2018 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
import time
import re
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_text, to_bytes
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.utils.display import Display
display = Display()
PRIVATE_KEYS_RE = re.compile("__.+__")
class ActionModule(_ActionModule):
def run(self, task_vars=None):
config_module = hasattr(self, "_config_module") and self._config_module
if config_module and self._task.args.get("src"):
try:
self._handle_src_option()
except AnsibleError as e:
return {"failed": True, "msg": e.message, "changed": False}
result = super(ActionModule, self).run(task_vars=task_vars)
if (
config_module
and self._task.args.get("backup")
and not result.get("failed")
):
self._handle_backup_option(result, task_vars)
return result
def _handle_backup_option(self, result, task_vars):
filename = None
backup_path = None
try:
content = result["__backup__"]
except KeyError:
raise AnsibleError("Failed while reading configuration backup")
backup_options = self._task.args.get("backup_options")
if backup_options:
filename = backup_options.get("filename")
backup_path = backup_options.get("dir_path")
if not backup_path:
cwd = self._get_working_path()
backup_path = os.path.join(cwd, "backup")
if not filename:
tstamp = time.strftime(
"%Y-%m-%d@%H:%M:%S", time.localtime(time.time())
)
filename = "%s_config.%s" % (
task_vars["inventory_hostname"],
tstamp,
)
dest = os.path.join(backup_path, filename)
backup_path = os.path.expanduser(
os.path.expandvars(
to_bytes(backup_path, errors="surrogate_or_strict")
)
)
if not os.path.exists(backup_path):
os.makedirs(backup_path)
new_task = self._task.copy()
for item in self._task.args:
if not item.startswith("_"):
new_task.args.pop(item, None)
new_task.args.update(dict(content=content, dest=dest))
copy_action = self._shared_loader_obj.action_loader.get(
"copy",
task=new_task,
connection=self._connection,
play_context=self._play_context,
loader=self._loader,
templar=self._templar,
shared_loader_obj=self._shared_loader_obj,
)
copy_result = copy_action.run(task_vars=task_vars)
if copy_result.get("failed"):
result["failed"] = copy_result["failed"]
result["msg"] = copy_result.get("msg")
return
result["backup_path"] = dest
if copy_result.get("changed", False):
result["changed"] = copy_result["changed"]
if backup_options and backup_options.get("filename"):
result["date"] = time.strftime(
"%Y-%m-%d",
time.gmtime(os.stat(result["backup_path"]).st_ctime),
)
result["time"] = time.strftime(
"%H:%M:%S",
time.gmtime(os.stat(result["backup_path"]).st_ctime),
)
else:
result["date"] = tstamp.split("@")[0]
result["time"] = tstamp.split("@")[1]
result["shortname"] = result["backup_path"][::-1].split(".", 1)[1][
::-1
]
result["filename"] = result["backup_path"].split("/")[-1]
# strip out any keys that have two leading and two trailing
# underscore characters
for key in list(result.keys()):
if PRIVATE_KEYS_RE.match(key):
del result[key]
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _handle_src_option(self, convert_data=True):
src = self._task.args.get("src")
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit("src").scheme:
source = src
else:
source = self._loader.path_dwim_relative(
working_path, "templates", src
)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise AnsibleError("path specified in src not found")
try:
with open(source, "r") as f:
template_data = to_text(f.read())
except IOError as e:
raise AnsibleError(
"unable to load src file {0}, I/O error({1}): {2}".format(
source, e.errno, e.strerror
)
)
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
with self._templar.set_temporary_context(searchpath=searchpath):
self._task.args["src"] = self._templar.template(
template_data, convert_data=convert_data
)
def _get_network_os(self, task_vars):
if "network_os" in self._task.args and self._task.args["network_os"]:
display.vvvv("Getting network OS from task argument")
network_os = self._task.args["network_os"]
elif self._play_context.network_os:
display.vvvv("Getting network OS from inventory")
network_os = self._play_context.network_os
elif (
"network_os" in task_vars.get("ansible_facts", {})
and task_vars["ansible_facts"]["network_os"]
):
display.vvvv("Getting network OS from fact")
network_os = task_vars["ansible_facts"]["network_os"]
else:
raise AnsibleError(
"ansible_network_os must be specified on this host"
)
return network_os

@ -0,0 +1,42 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """become: enable
short_description: Switch to elevated permissions on a network device
description:
- This become plugins allows elevated permissions on a remote network device.
author: ansible (@core)
options:
become_pass:
description: password
ini:
- section: enable_become_plugin
key: password
vars:
- name: ansible_become_password
- name: ansible_become_pass
- name: ansible_enable_pass
env:
- name: ANSIBLE_BECOME_PASS
- name: ANSIBLE_ENABLE_PASS
notes:
- enable is really implemented in the network connection handler and as such can only
be used with network connections.
- This plugin ignores the 'become_exe' and 'become_user' settings as it uses an API
and not an executable.
"""
from ansible.plugins.become import BecomeBase
class BecomeModule(BecomeBase):
name = "ansible.netcommon.enable"
def build_become_command(self, cmd, shell):
# enable is implemented inside the network connection plugins
return cmd

@ -0,0 +1,324 @@
# (c) 2018 Red Hat Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """author: Ansible Networking Team
connection: httpapi
short_description: Use httpapi to run command on network appliances
description:
- This connection plugin provides a connection to remote devices over a HTTP(S)-based
api.
options:
host:
description:
- Specifies the remote device FQDN or IP address to establish the HTTP(S) connection
to.
default: inventory_hostname
vars:
- name: ansible_host
port:
type: int
description:
- Specifies the port on the remote device that listens for connections when establishing
the HTTP(S) connection.
- When unspecified, will pick 80 or 443 based on the value of use_ssl.
ini:
- section: defaults
key: remote_port
env:
- name: ANSIBLE_REMOTE_PORT
vars:
- name: ansible_httpapi_port
network_os:
description:
- Configures the device platform network operating system. This value is used
to load the correct httpapi plugin to communicate with the remote device
vars:
- name: ansible_network_os
remote_user:
description:
- The username used to authenticate to the remote device when the API connection
is first established. If the remote_user is not specified, the connection will
use the username of the logged in user.
- Can be configured from the CLI via the C(--user) or C(-u) options.
ini:
- section: defaults
key: remote_user
env:
- name: ANSIBLE_REMOTE_USER
vars:
- name: ansible_user
password:
description:
- Configures the user password used to authenticate to the remote device when
needed for the device API.
vars:
- name: ansible_password
- name: ansible_httpapi_pass
- name: ansible_httpapi_password
use_ssl:
type: boolean
description:
- Whether to connect using SSL (HTTPS) or not (HTTP).
default: false
vars:
- name: ansible_httpapi_use_ssl
validate_certs:
type: boolean
description:
- Whether to validate SSL certificates
default: true
vars:
- name: ansible_httpapi_validate_certs
use_proxy:
type: boolean
description:
- Whether to use https_proxy for requests.
default: true
vars:
- name: ansible_httpapi_use_proxy
become:
type: boolean
description:
- The become option will instruct the CLI session to attempt privilege escalation
on platforms that support it. Normally this means transitioning from user mode
to C(enable) mode in the CLI session. If become is set to True and the remote
device does not support privilege escalation or the privilege has already been
elevated, then this option is silently ignored.
- Can be configured from the CLI via the C(--become) or C(-b) options.
default: false
ini:
- section: privilege_escalation
key: become
env:
- name: ANSIBLE_BECOME
vars:
- name: ansible_become
become_method:
description:
- This option allows the become method to be specified in for handling privilege
escalation. Typically the become_method value is set to C(enable) but could
be defined as other values.
default: sudo
ini:
- section: privilege_escalation
key: become_method
env:
- name: ANSIBLE_BECOME_METHOD
vars:
- name: ansible_become_method
persistent_connect_timeout:
type: int
description:
- Configures, in seconds, the amount of time to wait when trying to initially
establish a persistent connection. If this value expires before the connection
to the remote device is completed, the connection will fail.
default: 30
ini:
- section: persistent_connection
key: connect_timeout
env:
- name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT
vars:
- name: ansible_connect_timeout
persistent_command_timeout:
type: int
description:
- Configures, in seconds, the amount of time to wait for a command to return from
the remote device. If this timer is exceeded before the command returns, the
connection plugin will raise an exception and close.
default: 30
ini:
- section: persistent_connection
key: command_timeout
env:
- name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT
vars:
- name: ansible_command_timeout
persistent_log_messages:
type: boolean
description:
- This flag will enable logging the command executed and response received from
target device in the ansible log file. For this option to work 'log_path' ansible
configuration option is required to be set to a file path with write access.
- Be sure to fully understand the security implications of enabling this option
as it could create a security vulnerability by logging sensitive information
in log file.
default: false
ini:
- section: persistent_connection
key: log_messages
env:
- name: ANSIBLE_PERSISTENT_LOG_MESSAGES
vars:
- name: ansible_persistent_log_messages
"""
from io import BytesIO
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_bytes
from ansible.module_utils.six import PY3
from ansible.module_utils.six.moves import cPickle
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import open_url
from ansible.playbook.play_context import PlayContext
from ansible.plugins.loader import httpapi_loader
from ansible.plugins.connection import NetworkConnectionBase, ensure_connect
class Connection(NetworkConnectionBase):
"""Network API connection"""
transport = "ansible.netcommon.httpapi"
has_pipelining = True
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(
play_context, new_stdin, *args, **kwargs
)
self._url = None
self._auth = None
if self._network_os:
self.httpapi = httpapi_loader.get(self._network_os, self)
if self.httpapi:
self._sub_plugin = {
"type": "httpapi",
"name": self.httpapi._load_name,
"obj": self.httpapi,
}
self.queue_message(
"vvvv",
"loaded API plugin %s from path %s for network_os %s"
% (
self.httpapi._load_name,
self.httpapi._original_path,
self._network_os,
),
)
else:
raise AnsibleConnectionFailure(
"unable to load API plugin for network_os %s"
% self._network_os
)
else:
raise AnsibleConnectionFailure(
"Unable to automatically determine host network os. Please "
"manually configure ansible_network_os value for this host"
)
self.queue_message("log", "network_os is set to %s" % self._network_os)
def update_play_context(self, pc_data):
"""Updates the play context information for the connection"""
pc_data = to_bytes(pc_data)
if PY3:
pc_data = cPickle.loads(pc_data, encoding="bytes")
else:
pc_data = cPickle.loads(pc_data)
play_context = PlayContext()
play_context.deserialize(pc_data)
self.queue_message("vvvv", "updating play_context for connection")
if self._play_context.become ^ play_context.become:
self.set_become(play_context)
if play_context.become is True:
self.queue_message("vvvv", "authorizing connection")
else:
self.queue_message("vvvv", "deauthorizing connection")
self._play_context = play_context
def _connect(self):
if not self.connected:
protocol = "https" if self.get_option("use_ssl") else "http"
host = self.get_option("host")
port = self.get_option("port") or (
443 if protocol == "https" else 80
)
self._url = "%s://%s:%s" % (protocol, host, port)
self.queue_message(
"vvv",
"ESTABLISH HTTP(S) CONNECTFOR USER: %s TO %s"
% (self._play_context.remote_user, self._url),
)
self.httpapi.set_become(self._play_context)
self._connected = True
self.httpapi.login(
self.get_option("remote_user"), self.get_option("password")
)
def close(self):
"""
Close the active session to the device
"""
# only close the connection if its connected.
if self._connected:
self.queue_message("vvvv", "closing http(s) connection to device")
self.logout()
super(Connection, self).close()
@ensure_connect
def send(self, path, data, **kwargs):
"""
Sends the command to the device over api
"""
url_kwargs = dict(
timeout=self.get_option("persistent_command_timeout"),
validate_certs=self.get_option("validate_certs"),
use_proxy=self.get_option("use_proxy"),
headers={},
)
url_kwargs.update(kwargs)
if self._auth:
# Avoid modifying passed-in headers
headers = dict(kwargs.get("headers", {}))
headers.update(self._auth)
url_kwargs["headers"] = headers
else:
url_kwargs["force_basic_auth"] = True
url_kwargs["url_username"] = self.get_option("remote_user")
url_kwargs["url_password"] = self.get_option("password")
try:
url = self._url + path
self._log_messages(
"send url '%s' with data '%s' and kwargs '%s'"
% (url, data, url_kwargs)
)
response = open_url(url, data=data, **url_kwargs)
except HTTPError as exc:
is_handled = self.handle_httperror(exc)
if is_handled is True:
return self.send(path, data, **kwargs)
elif is_handled is False:
raise
else:
response = is_handled
except URLError as exc:
raise AnsibleConnectionFailure(
"Could not connect to {0}: {1}".format(
self._url + path, exc.reason
)
)
response_buffer = BytesIO()
resp_data = response.read()
self._log_messages("received response: '%s'" % resp_data)
response_buffer.write(resp_data)
# Try to assign a new auth token if one is given
self._auth = self.update_auth(response, response_buffer) or self._auth
response_buffer.seek(0)
return response, response_buffer

@ -0,0 +1,404 @@
# (c) 2016 Red Hat Inc.
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """author: Ansible Networking Team
connection: netconf
short_description: Provides a persistent connection using the netconf protocol
description:
- This connection plugin provides a connection to remote devices over the SSH NETCONF
subsystem. This connection plugin is typically used by network devices for sending
and receiving RPC calls over NETCONF.
- Note this connection plugin requires ncclient to be installed on the local Ansible
controller.
requirements:
- ncclient
options:
host:
description:
- Specifies the remote device FQDN or IP address to establish the SSH connection
to.
default: inventory_hostname
vars:
- name: ansible_host
port:
type: int
description:
- Specifies the port on the remote device that listens for connections when establishing
the SSH connection.
default: 830
ini:
- section: defaults
key: remote_port
env:
- name: ANSIBLE_REMOTE_PORT
vars:
- name: ansible_port
network_os:
description:
- Configures the device platform network operating system. This value is used
to load a device specific netconf plugin. If this option is not configured
(or set to C(auto)), then Ansible will attempt to guess the correct network_os
to use. If it can not guess a network_os correctly it will use C(default).
vars:
- name: ansible_network_os
remote_user:
description:
- The username used to authenticate to the remote device when the SSH connection
is first established. If the remote_user is not specified, the connection will
use the username of the logged in user.
- Can be configured from the CLI via the C(--user) or C(-u) options.
ini:
- section: defaults
key: remote_user
env:
- name: ANSIBLE_REMOTE_USER
vars:
- name: ansible_user
password:
description:
- Configures the user password used to authenticate to the remote device when
first establishing the SSH connection.
vars:
- name: ansible_password
- name: ansible_ssh_pass
- name: ansible_ssh_password
- name: ansible_netconf_password
private_key_file:
description:
- The private SSH key or certificate file used to authenticate to the remote device
when first establishing the SSH connection.
ini:
- section: defaults
key: private_key_file
env:
- name: ANSIBLE_PRIVATE_KEY_FILE
vars:
- name: ansible_private_key_file
look_for_keys:
default: true
description:
- Enables looking for ssh keys in the usual locations for ssh keys (e.g. :file:`~/.ssh/id_*`).
env:
- name: ANSIBLE_PARAMIKO_LOOK_FOR_KEYS
ini:
- section: paramiko_connection
key: look_for_keys
type: boolean
host_key_checking:
description: Set this to "False" if you want to avoid host key checking by the
underlying tools Ansible uses to connect to the host
type: boolean
default: true
env:
- name: ANSIBLE_HOST_KEY_CHECKING
- name: ANSIBLE_SSH_HOST_KEY_CHECKING
- name: ANSIBLE_NETCONF_HOST_KEY_CHECKING
ini:
- section: defaults
key: host_key_checking
- section: paramiko_connection
key: host_key_checking
vars:
- name: ansible_host_key_checking
- name: ansible_ssh_host_key_checking
- name: ansible_netconf_host_key_checking
persistent_connect_timeout:
type: int
description:
- Configures, in seconds, the amount of time to wait when trying to initially
establish a persistent connection. If this value expires before the connection
to the remote device is completed, the connection will fail.
default: 30
ini:
- section: persistent_connection
key: connect_timeout
env:
- name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT
vars:
- name: ansible_connect_timeout
persistent_command_timeout:
type: int
description:
- Configures, in seconds, the amount of time to wait for a command to return from
the remote device. If this timer is exceeded before the command returns, the
connection plugin will raise an exception and close.
default: 30
ini:
- section: persistent_connection
key: command_timeout
env:
- name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT
vars:
- name: ansible_command_timeout
netconf_ssh_config:
description:
- This variable is used to enable bastion/jump host with netconf connection. If
set to True the bastion/jump host ssh settings should be present in ~/.ssh/config
file, alternatively it can be set to custom ssh configuration file path to read
the bastion/jump host settings.
ini:
- section: netconf_connection
key: ssh_config
version_added: '2.7'
env:
- name: ANSIBLE_NETCONF_SSH_CONFIG
vars:
- name: ansible_netconf_ssh_config
version_added: '2.7'
persistent_log_messages:
type: boolean
description:
- This flag will enable logging the command executed and response received from
target device in the ansible log file. For this option to work 'log_path' ansible
configuration option is required to be set to a file path with write access.
- Be sure to fully understand the security implications of enabling this option
as it could create a security vulnerability by logging sensitive information
in log file.
default: false
ini:
- section: persistent_connection
key: log_messages
env:
- name: ANSIBLE_PERSISTENT_LOG_MESSAGES
vars:
- name: ansible_persistent_log_messages
"""
import os
import logging
import json
from ansible.errors import AnsibleConnectionFailure, AnsibleError
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.basic import missing_required_lib
from ansible.module_utils.parsing.convert_bool import (
BOOLEANS_TRUE,
BOOLEANS_FALSE,
)
from ansible.plugins.loader import netconf_loader
from ansible.plugins.connection import NetworkConnectionBase, ensure_connect
try:
from ncclient import manager
from ncclient.operations import RPCError
from ncclient.transport.errors import SSHUnknownHostError
from ncclient.xml_ import to_ele, to_xml
HAS_NCCLIENT = True
NCCLIENT_IMP_ERR = None
except (
ImportError,
AttributeError,
) as err: # paramiko and gssapi are incompatible and raise AttributeError not ImportError
HAS_NCCLIENT = False
NCCLIENT_IMP_ERR = err
logging.getLogger("ncclient").setLevel(logging.INFO)
class Connection(NetworkConnectionBase):
"""NetConf connections"""
transport = "ansible.netcommon.netconf"
has_pipelining = False
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(
play_context, new_stdin, *args, **kwargs
)
# If network_os is not specified then set the network os to auto
# This will be used to trigger the use of guess_network_os when connecting.
self._network_os = self._network_os or "auto"
self.netconf = netconf_loader.get(self._network_os, self)
if self.netconf:
self._sub_plugin = {
"type": "netconf",
"name": self.netconf._load_name,
"obj": self.netconf,
}
self.queue_message(
"vvvv",
"loaded netconf plugin %s from path %s for network_os %s"
% (
self.netconf._load_name,
self.netconf._original_path,
self._network_os,
),
)
else:
self.netconf = netconf_loader.get("default", self)
self._sub_plugin = {
"type": "netconf",
"name": "default",
"obj": self.netconf,
}
self.queue_message(
"display",
"unable to load netconf plugin for network_os %s, falling back to default plugin"
% self._network_os,
)
self.queue_message("log", "network_os is set to %s" % self._network_os)
self._manager = None
self.key_filename = None
self._ssh_config = None
def exec_command(self, cmd, in_data=None, sudoable=True):
"""Sends the request to the node and returns the reply
The method accepts two forms of request. The first form is as a byte
string that represents xml string be send over netconf session.
The second form is a json-rpc (2.0) byte string.
"""
if self._manager:
# to_ele operates on native strings
request = to_ele(to_native(cmd, errors="surrogate_or_strict"))
if request is None:
return "unable to parse request"
try:
reply = self._manager.rpc(request)
except RPCError as exc:
error = self.internal_error(
data=to_text(to_xml(exc.xml), errors="surrogate_or_strict")
)
return json.dumps(error)
return reply.data_xml
else:
return super(Connection, self).exec_command(cmd, in_data, sudoable)
@property
@ensure_connect
def manager(self):
return self._manager
def _connect(self):
if not HAS_NCCLIENT:
raise AnsibleError(
"%s: %s"
% (
missing_required_lib("ncclient"),
to_native(NCCLIENT_IMP_ERR),
)
)
self.queue_message("log", "ssh connection done, starting ncclient")
allow_agent = True
if self._play_context.password is not None:
allow_agent = False
setattr(self._play_context, "allow_agent", allow_agent)
self.key_filename = (
self._play_context.private_key_file
or self.get_option("private_key_file")
)
if self.key_filename:
self.key_filename = str(os.path.expanduser(self.key_filename))
self._ssh_config = self.get_option("netconf_ssh_config")
if self._ssh_config in BOOLEANS_TRUE:
self._ssh_config = True
elif self._ssh_config in BOOLEANS_FALSE:
self._ssh_config = None
# Try to guess the network_os if the network_os is set to auto
if self._network_os == "auto":
for cls in netconf_loader.all(class_only=True):
network_os = cls.guess_network_os(self)
if network_os:
self.queue_message(
"vvv", "discovered network_os %s" % network_os
)
self._network_os = network_os
# If we have tried to detect the network_os but were unable to i.e. network_os is still 'auto'
# then use default as the network_os
if self._network_os == "auto":
# Network os not discovered. Set it to default
self.queue_message(
"vvv",
"Unable to discover network_os. Falling back to default.",
)
self._network_os = "default"
try:
ncclient_device_handler = self.netconf.get_option(
"ncclient_device_handler"
)
except KeyError:
ncclient_device_handler = "default"
self.queue_message(
"vvv",
"identified ncclient device handler: %s."
% ncclient_device_handler,
)
device_params = {"name": ncclient_device_handler}
try:
port = self._play_context.port or 830
self.queue_message(
"vvv",
"ESTABLISH NETCONF SSH CONNECTION FOR USER: %s on PORT %s TO %s WITH SSH_CONFIG = %s"
% (
self._play_context.remote_user,
port,
self._play_context.remote_addr,
self._ssh_config,
),
)
self._manager = manager.connect(
host=self._play_context.remote_addr,
port=port,
username=self._play_context.remote_user,
password=self._play_context.password,
key_filename=self.key_filename,
hostkey_verify=self.get_option("host_key_checking"),
look_for_keys=self.get_option("look_for_keys"),
device_params=device_params,
allow_agent=self._play_context.allow_agent,
timeout=self.get_option("persistent_connect_timeout"),
ssh_config=self._ssh_config,
)
self._manager._timeout = self.get_option(
"persistent_command_timeout"
)
except SSHUnknownHostError as exc:
raise AnsibleConnectionFailure(to_native(exc))
except ImportError:
raise AnsibleError(
"connection=netconf is not supported on {0}".format(
self._network_os
)
)
if not self._manager.connected:
return 1, b"", b"not connected"
self.queue_message(
"log", "ncclient manager object created successfully"
)
self._connected = True
super(Connection, self)._connect()
return (
0,
to_bytes(self._manager.session_id, errors="surrogate_or_strict"),
b"",
)
def close(self):
if self._manager:
self._manager.close_session()
super(Connection, self).close()

@ -0,0 +1,924 @@
# (c) 2016 Red Hat Inc.
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """author: Ansible Networking Team
connection: network_cli
short_description: Use network_cli to run command on network appliances
description:
- This connection plugin provides a connection to remote devices over the SSH and
implements a CLI shell. This connection plugin is typically used by network devices
for sending and receiving CLi commands to network devices.
options:
host:
description:
- Specifies the remote device FQDN or IP address to establish the SSH connection
to.
default: inventory_hostname
vars:
- name: ansible_host
port:
type: int
description:
- Specifies the port on the remote device that listens for connections when establishing
the SSH connection.
default: 22
ini:
- section: defaults
key: remote_port
env:
- name: ANSIBLE_REMOTE_PORT
vars:
- name: ansible_port
network_os:
description:
- Configures the device platform network operating system. This value is used
to load the correct terminal and cliconf plugins to communicate with the remote
device.
vars:
- name: ansible_network_os
remote_user:
description:
- The username used to authenticate to the remote device when the SSH connection
is first established. If the remote_user is not specified, the connection will
use the username of the logged in user.
- Can be configured from the CLI via the C(--user) or C(-u) options.
ini:
- section: defaults
key: remote_user
env:
- name: ANSIBLE_REMOTE_USER
vars:
- name: ansible_user
password:
description:
- Configures the user password used to authenticate to the remote device when
first establishing the SSH connection.
vars:
- name: ansible_password
- name: ansible_ssh_pass
- name: ansible_ssh_password
private_key_file:
description:
- The private SSH key or certificate file used to authenticate to the remote device
when first establishing the SSH connection.
ini:
- section: defaults
key: private_key_file
env:
- name: ANSIBLE_PRIVATE_KEY_FILE
vars:
- name: ansible_private_key_file
become:
type: boolean
description:
- The become option will instruct the CLI session to attempt privilege escalation
on platforms that support it. Normally this means transitioning from user mode
to C(enable) mode in the CLI session. If become is set to True and the remote
device does not support privilege escalation or the privilege has already been
elevated, then this option is silently ignored.
- Can be configured from the CLI via the C(--become) or C(-b) options.
default: false
ini:
- section: privilege_escalation
key: become
env:
- name: ANSIBLE_BECOME
vars:
- name: ansible_become
become_method:
description:
- This option allows the become method to be specified in for handling privilege
escalation. Typically the become_method value is set to C(enable) but could
be defined as other values.
default: sudo
ini:
- section: privilege_escalation
key: become_method
env:
- name: ANSIBLE_BECOME_METHOD
vars:
- name: ansible_become_method
host_key_auto_add:
type: boolean
description:
- By default, Ansible will prompt the user before adding SSH keys to the known
hosts file. Since persistent connections such as network_cli run in background
processes, the user will never be prompted. By enabling this option, unknown
host keys will automatically be added to the known hosts file.
- Be sure to fully understand the security implications of enabling this option
on production systems as it could create a security vulnerability.
default: false
ini:
- section: paramiko_connection
key: host_key_auto_add
env:
- name: ANSIBLE_HOST_KEY_AUTO_ADD
persistent_connect_timeout:
type: int
description:
- Configures, in seconds, the amount of time to wait when trying to initially
establish a persistent connection. If this value expires before the connection
to the remote device is completed, the connection will fail.
default: 30
ini:
- section: persistent_connection
key: connect_timeout
env:
- name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT
vars:
- name: ansible_connect_timeout
persistent_command_timeout:
type: int
description:
- Configures, in seconds, the amount of time to wait for a command to return from
the remote device. If this timer is exceeded before the command returns, the
connection plugin will raise an exception and close.
default: 30
ini:
- section: persistent_connection
key: command_timeout
env:
- name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT
vars:
- name: ansible_command_timeout
persistent_buffer_read_timeout:
type: float
description:
- Configures, in seconds, the amount of time to wait for the data to be read from
Paramiko channel after the command prompt is matched. This timeout value ensures
that command prompt matched is correct and there is no more data left to be
received from remote host.
default: 0.1
ini:
- section: persistent_connection
key: buffer_read_timeout
env:
- name: ANSIBLE_PERSISTENT_BUFFER_READ_TIMEOUT
vars:
- name: ansible_buffer_read_timeout
persistent_log_messages:
type: boolean
description:
- This flag will enable logging the command executed and response received from
target device in the ansible log file. For this option to work 'log_path' ansible
configuration option is required to be set to a file path with write access.
- Be sure to fully understand the security implications of enabling this option
as it could create a security vulnerability by logging sensitive information
in log file.
default: false
ini:
- section: persistent_connection
key: log_messages
env:
- name: ANSIBLE_PERSISTENT_LOG_MESSAGES
vars:
- name: ansible_persistent_log_messages
terminal_stdout_re:
type: list
elements: dict
description:
- A single regex pattern or a sequence of patterns along with optional flags to
match the command prompt from the received response chunk. This option accepts
C(pattern) and C(flags) keys. The value of C(pattern) is a python regex pattern
to match the response and the value of C(flags) is the value accepted by I(flags)
argument of I(re.compile) python method to control the way regex is matched
with the response, for example I('re.I').
vars:
- name: ansible_terminal_stdout_re
terminal_stderr_re:
type: list
elements: dict
description:
- This option provides the regex pattern and optional flags to match the error
string from the received response chunk. This option accepts C(pattern) and
C(flags) keys. The value of C(pattern) is a python regex pattern to match the
response and the value of C(flags) is the value accepted by I(flags) argument
of I(re.compile) python method to control the way regex is matched with the
response, for example I('re.I').
vars:
- name: ansible_terminal_stderr_re
terminal_initial_prompt:
type: list
description:
- A single regex pattern or a sequence of patterns to evaluate the expected prompt
at the time of initial login to the remote host.
vars:
- name: ansible_terminal_initial_prompt
terminal_initial_answer:
type: list
description:
- The answer to reply with if the C(terminal_initial_prompt) is matched. The value
can be a single answer or a list of answers for multiple terminal_initial_prompt.
In case the login menu has multiple prompts the sequence of the prompt and excepted
answer should be in same order and the value of I(terminal_prompt_checkall)
should be set to I(True) if all the values in C(terminal_initial_prompt) are
expected to be matched and set to I(False) if any one login prompt is to be
matched.
vars:
- name: ansible_terminal_initial_answer
terminal_initial_prompt_checkall:
type: boolean
description:
- By default the value is set to I(False) and any one of the prompts mentioned
in C(terminal_initial_prompt) option is matched it won't check for other prompts.
When set to I(True) it will check for all the prompts mentioned in C(terminal_initial_prompt)
option in the given order and all the prompts should be received from remote
host if not it will result in timeout.
default: false
vars:
- name: ansible_terminal_initial_prompt_checkall
terminal_inital_prompt_newline:
type: boolean
description:
- This boolean flag, that when set to I(True) will send newline in the response
if any of values in I(terminal_initial_prompt) is matched.
default: true
vars:
- name: ansible_terminal_initial_prompt_newline
network_cli_retries:
description:
- Number of attempts to connect to remote host. The delay time between the retires
increases after every attempt by power of 2 in seconds till either the maximum
attempts are exhausted or any of the C(persistent_command_timeout) or C(persistent_connect_timeout)
timers are triggered.
default: 3
type: integer
env:
- name: ANSIBLE_NETWORK_CLI_RETRIES
ini:
- section: persistent_connection
key: network_cli_retries
vars:
- name: ansible_network_cli_retries
"""
from functools import wraps
import getpass
import json
import logging
import re
import os
import signal
import socket
import time
import traceback
from io import BytesIO
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils.six import PY3
from ansible.module_utils.six.moves import cPickle
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
to_list,
)
from ansible.module_utils._text import to_bytes, to_text
from ansible.playbook.play_context import PlayContext
from ansible.plugins.connection import NetworkConnectionBase
from ansible.plugins.loader import (
cliconf_loader,
terminal_loader,
connection_loader,
)
def ensure_connect(func):
@wraps(func)
def wrapped(self, *args, **kwargs):
if not self._connected:
self._connect()
self.update_cli_prompt_context()
return func(self, *args, **kwargs)
return wrapped
class AnsibleCmdRespRecv(Exception):
pass
class Connection(NetworkConnectionBase):
""" CLI (shell) SSH connections on Paramiko """
transport = "ansible.netcommon.network_cli"
has_pipelining = True
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(
play_context, new_stdin, *args, **kwargs
)
self._ssh_shell = None
self._matched_prompt = None
self._matched_cmd_prompt = None
self._matched_pattern = None
self._last_response = None
self._history = list()
self._command_response = None
self._last_recv_window = None
self._terminal = None
self.cliconf = None
self._paramiko_conn = None
# Managing prompt context
self._check_prompt = False
self._task_uuid = to_text(kwargs.get("task_uuid", ""))
if self._play_context.verbosity > 3:
logging.getLogger("paramiko").setLevel(logging.DEBUG)
if self._network_os:
self._terminal = terminal_loader.get(self._network_os, self)
if not self._terminal:
raise AnsibleConnectionFailure(
"network os %s is not supported" % self._network_os
)
self.cliconf = cliconf_loader.get(self._network_os, self)
if self.cliconf:
self._sub_plugin = {
"type": "cliconf",
"name": self.cliconf._load_name,
"obj": self.cliconf,
}
self.queue_message(
"vvvv",
"loaded cliconf plugin %s from path %s for network_os %s"
% (
self.cliconf._load_name,
self.cliconf._original_path,
self._network_os,
),
)
else:
self.queue_message(
"vvvv",
"unable to load cliconf for network_os %s"
% self._network_os,
)
else:
raise AnsibleConnectionFailure(
"Unable to automatically determine host network os. Please "
"manually configure ansible_network_os value for this host"
)
self.queue_message("log", "network_os is set to %s" % self._network_os)
@property
def paramiko_conn(self):
if self._paramiko_conn is None:
self._paramiko_conn = connection_loader.get(
"paramiko", self._play_context, "/dev/null"
)
self._paramiko_conn.set_options(
direct={
"look_for_keys": not bool(
self._play_context.password
and not self._play_context.private_key_file
)
}
)
return self._paramiko_conn
def _get_log_channel(self):
name = "p=%s u=%s | " % (os.getpid(), getpass.getuser())
name += "paramiko [%s]" % self._play_context.remote_addr
return name
@ensure_connect
def get_prompt(self):
"""Returns the current prompt from the device"""
return self._matched_prompt
def exec_command(self, cmd, in_data=None, sudoable=True):
# this try..except block is just to handle the transition to supporting
# network_cli as a toplevel connection. Once connection=local is gone,
# this block can be removed as well and all calls passed directly to
# the local connection
if self._ssh_shell:
try:
cmd = json.loads(to_text(cmd, errors="surrogate_or_strict"))
kwargs = {
"command": to_bytes(
cmd["command"], errors="surrogate_or_strict"
)
}
for key in (
"prompt",
"answer",
"sendonly",
"newline",
"prompt_retry_check",
):
if cmd.get(key) is True or cmd.get(key) is False:
kwargs[key] = cmd[key]
elif cmd.get(key) is not None:
kwargs[key] = to_bytes(
cmd[key], errors="surrogate_or_strict"
)
return self.send(**kwargs)
except ValueError:
cmd = to_bytes(cmd, errors="surrogate_or_strict")
return self.send(command=cmd)
else:
return super(Connection, self).exec_command(cmd, in_data, sudoable)
def update_play_context(self, pc_data):
"""Updates the play context information for the connection"""
pc_data = to_bytes(pc_data)
if PY3:
pc_data = cPickle.loads(pc_data, encoding="bytes")
else:
pc_data = cPickle.loads(pc_data)
play_context = PlayContext()
play_context.deserialize(pc_data)
self.queue_message("vvvv", "updating play_context for connection")
if self._play_context.become ^ play_context.become:
if play_context.become is True:
auth_pass = play_context.become_pass
self._terminal.on_become(passwd=auth_pass)
self.queue_message("vvvv", "authorizing connection")
else:
self._terminal.on_unbecome()
self.queue_message("vvvv", "deauthorizing connection")
self._play_context = play_context
if hasattr(self, "reset_history"):
self.reset_history()
if hasattr(self, "disable_response_logging"):
self.disable_response_logging()
def set_check_prompt(self, task_uuid):
self._check_prompt = task_uuid
def update_cli_prompt_context(self):
# set cli prompt context at the start of new task run only
if self._check_prompt and self._task_uuid != self._check_prompt:
self._task_uuid, self._check_prompt = self._check_prompt, False
self.set_cli_prompt_context()
def _connect(self):
"""
Connects to the remote device and starts the terminal
"""
if not self.connected:
self.paramiko_conn._set_log_channel(self._get_log_channel())
self.paramiko_conn.force_persistence = self.force_persistence
command_timeout = self.get_option("persistent_command_timeout")
max_pause = min(
[
self.get_option("persistent_connect_timeout"),
command_timeout,
]
)
retries = self.get_option("network_cli_retries")
total_pause = 0
for attempt in range(retries + 1):
try:
ssh = self.paramiko_conn._connect()
break
except Exception as e:
pause = 2 ** (attempt + 1)
if attempt == retries or total_pause >= max_pause:
raise AnsibleConnectionFailure(
to_text(e, errors="surrogate_or_strict")
)
else:
msg = (
u"network_cli_retry: attempt: %d, caught exception(%s), "
u"pausing for %d seconds"
% (
attempt + 1,
to_text(e, errors="surrogate_or_strict"),
pause,
)
)
self.queue_message("vv", msg)
time.sleep(pause)
total_pause += pause
continue
self.queue_message("vvvv", "ssh connection done, setting terminal")
self._connected = True
self._ssh_shell = ssh.ssh.invoke_shell()
self._ssh_shell.settimeout(command_timeout)
self.queue_message(
"vvvv",
"loaded terminal plugin for network_os %s" % self._network_os,
)
terminal_initial_prompt = (
self.get_option("terminal_initial_prompt")
or self._terminal.terminal_initial_prompt
)
terminal_initial_answer = (
self.get_option("terminal_initial_answer")
or self._terminal.terminal_initial_answer
)
newline = (
self.get_option("terminal_inital_prompt_newline")
or self._terminal.terminal_inital_prompt_newline
)
check_all = (
self.get_option("terminal_initial_prompt_checkall") or False
)
self.receive(
prompts=terminal_initial_prompt,
answer=terminal_initial_answer,
newline=newline,
check_all=check_all,
)
if self._play_context.become:
self.queue_message("vvvv", "firing event: on_become")
auth_pass = self._play_context.become_pass
self._terminal.on_become(passwd=auth_pass)
self.queue_message("vvvv", "firing event: on_open_shell()")
self._terminal.on_open_shell()
self.queue_message(
"vvvv", "ssh connection has completed successfully"
)
return self
def close(self):
"""
Close the active connection to the device
"""
# only close the connection if its connected.
if self._connected:
self.queue_message("debug", "closing ssh connection to device")
if self._ssh_shell:
self.queue_message("debug", "firing event: on_close_shell()")
self._terminal.on_close_shell()
self._ssh_shell.close()
self._ssh_shell = None
self.queue_message("debug", "cli session is now closed")
self.paramiko_conn.close()
self._paramiko_conn = None
self.queue_message(
"debug", "ssh connection has been closed successfully"
)
super(Connection, self).close()
def receive(
self,
command=None,
prompts=None,
answer=None,
newline=True,
prompt_retry_check=False,
check_all=False,
):
"""
Handles receiving of output from command
"""
self._matched_prompt = None
self._matched_cmd_prompt = None
recv = BytesIO()
handled = False
command_prompt_matched = False
matched_prompt_window = window_count = 0
# set terminal regex values for command prompt and errors in response
self._terminal_stderr_re = self._get_terminal_std_re(
"terminal_stderr_re"
)
self._terminal_stdout_re = self._get_terminal_std_re(
"terminal_stdout_re"
)
cache_socket_timeout = self._ssh_shell.gettimeout()
command_timeout = self.get_option("persistent_command_timeout")
self._validate_timeout_value(
command_timeout, "persistent_command_timeout"
)
if cache_socket_timeout != command_timeout:
self._ssh_shell.settimeout(command_timeout)
buffer_read_timeout = self.get_option("persistent_buffer_read_timeout")
self._validate_timeout_value(
buffer_read_timeout, "persistent_buffer_read_timeout"
)
self._log_messages("command: %s" % command)
while True:
if command_prompt_matched:
try:
signal.signal(
signal.SIGALRM, self._handle_buffer_read_timeout
)
signal.setitimer(signal.ITIMER_REAL, buffer_read_timeout)
data = self._ssh_shell.recv(256)
signal.alarm(0)
self._log_messages(
"response-%s: %s" % (window_count + 1, data)
)
# if data is still received on channel it indicates the prompt string
# is wrongly matched in between response chunks, continue to read
# remaining response.
command_prompt_matched = False
# restart command_timeout timer
signal.signal(signal.SIGALRM, self._handle_command_timeout)
signal.alarm(command_timeout)
except AnsibleCmdRespRecv:
# reset socket timeout to global timeout
self._ssh_shell.settimeout(cache_socket_timeout)
return self._command_response
else:
data = self._ssh_shell.recv(256)
self._log_messages(
"response-%s: %s" % (window_count + 1, data)
)
# when a channel stream is closed, received data will be empty
if not data:
break
recv.write(data)
offset = recv.tell() - 256 if recv.tell() > 256 else 0
recv.seek(offset)
window = self._strip(recv.read())
self._last_recv_window = window
window_count += 1
if prompts and not handled:
handled = self._handle_prompt(
window, prompts, answer, newline, False, check_all
)
matched_prompt_window = window_count
elif (
prompts
and handled
and prompt_retry_check
and matched_prompt_window + 1 == window_count
):
# check again even when handled, if same prompt repeats in next window
# (like in the case of a wrong enable password, etc) indicates
# value of answer is wrong, report this as error.
if self._handle_prompt(
window,
prompts,
answer,
newline,
prompt_retry_check,
check_all,
):
raise AnsibleConnectionFailure(
"For matched prompt '%s', answer is not valid"
% self._matched_cmd_prompt
)
if self._find_prompt(window):
self._last_response = recv.getvalue()
resp = self._strip(self._last_response)
self._command_response = self._sanitize(resp, command)
if buffer_read_timeout == 0.0:
# reset socket timeout to global timeout
self._ssh_shell.settimeout(cache_socket_timeout)
return self._command_response
else:
command_prompt_matched = True
@ensure_connect
def send(
self,
command,
prompt=None,
answer=None,
newline=True,
sendonly=False,
prompt_retry_check=False,
check_all=False,
):
"""
Sends the command to the device in the opened shell
"""
if check_all:
prompt_len = len(to_list(prompt))
answer_len = len(to_list(answer))
if prompt_len != answer_len:
raise AnsibleConnectionFailure(
"Number of prompts (%s) is not same as that of answers (%s)"
% (prompt_len, answer_len)
)
try:
cmd = b"%s\r" % command
self._history.append(cmd)
self._ssh_shell.sendall(cmd)
self._log_messages("send command: %s" % cmd)
if sendonly:
return
response = self.receive(
command, prompt, answer, newline, prompt_retry_check, check_all
)
return to_text(response, errors="surrogate_then_replace")
except (socket.timeout, AttributeError):
self.queue_message("error", traceback.format_exc())
raise AnsibleConnectionFailure(
"timeout value %s seconds reached while trying to send command: %s"
% (self._ssh_shell.gettimeout(), command.strip())
)
def _handle_buffer_read_timeout(self, signum, frame):
self.queue_message(
"vvvv",
"Response received, triggered 'persistent_buffer_read_timeout' timer of %s seconds"
% self.get_option("persistent_buffer_read_timeout"),
)
raise AnsibleCmdRespRecv()
def _handle_command_timeout(self, signum, frame):
msg = (
"command timeout triggered, timeout value is %s secs.\nSee the timeout setting options in the Network Debug and Troubleshooting Guide."
% self.get_option("persistent_command_timeout")
)
self.queue_message("log", msg)
raise AnsibleConnectionFailure(msg)
def _strip(self, data):
"""
Removes ANSI codes from device response
"""
for regex in self._terminal.ansi_re:
data = regex.sub(b"", data)
return data
def _handle_prompt(
self,
resp,
prompts,
answer,
newline,
prompt_retry_check=False,
check_all=False,
):
"""
Matches the command prompt and responds
:arg resp: Byte string containing the raw response from the remote
:arg prompts: Sequence of byte strings that we consider prompts for input
:arg answer: Sequence of Byte string to send back to the remote if we find a prompt.
A carriage return is automatically appended to this string.
:param prompt_retry_check: Bool value for trying to detect more prompts
:param check_all: Bool value to indicate if all the values in prompt sequence should be matched or any one of
given prompt.
:returns: True if a prompt was found in ``resp``. If check_all is True
will True only after all the prompt in the prompts list are matched. False otherwise.
"""
single_prompt = False
if not isinstance(prompts, list):
prompts = [prompts]
single_prompt = True
if not isinstance(answer, list):
answer = [answer]
prompts_regex = [re.compile(to_bytes(r), re.I) for r in prompts]
for index, regex in enumerate(prompts_regex):
match = regex.search(resp)
if match:
self._matched_cmd_prompt = match.group()
self._log_messages(
"matched command prompt: %s" % self._matched_cmd_prompt
)
# if prompt_retry_check is enabled to check if same prompt is
# repeated don't send answer again.
if not prompt_retry_check:
prompt_answer = (
answer[index] if len(answer) > index else answer[0]
)
self._ssh_shell.sendall(b"%s" % prompt_answer)
if newline:
self._ssh_shell.sendall(b"\r")
prompt_answer += b"\r"
self._log_messages(
"matched command prompt answer: %s" % prompt_answer
)
if check_all and prompts and not single_prompt:
prompts.pop(0)
answer.pop(0)
return False
return True
return False
def _sanitize(self, resp, command=None):
"""
Removes elements from the response before returning to the caller
"""
cleaned = []
for line in resp.splitlines():
if command and line.strip() == command.strip():
continue
for prompt in self._matched_prompt.strip().splitlines():
if prompt.strip() in line:
break
else:
cleaned.append(line)
return b"\n".join(cleaned).strip()
def _find_prompt(self, response):
"""Searches the buffered response for a matching command prompt
"""
errored_response = None
is_error_message = False
for regex in self._terminal_stderr_re:
if regex.search(response):
is_error_message = True
# Check if error response ends with command prompt if not
# receive it buffered prompt
for regex in self._terminal_stdout_re:
match = regex.search(response)
if match:
errored_response = response
self._matched_pattern = regex.pattern
self._matched_prompt = match.group()
self._log_messages(
"matched error regex '%s' from response '%s'"
% (self._matched_pattern, errored_response)
)
break
if not is_error_message:
for regex in self._terminal_stdout_re:
match = regex.search(response)
if match:
self._matched_pattern = regex.pattern
self._matched_prompt = match.group()
self._log_messages(
"matched cli prompt '%s' with regex '%s' from response '%s'"
% (
self._matched_prompt,
self._matched_pattern,
response,
)
)
if not errored_response:
return True
if errored_response:
raise AnsibleConnectionFailure(errored_response)
return False
def _validate_timeout_value(self, timeout, timer_name):
if timeout < 0:
raise AnsibleConnectionFailure(
"'%s' timer value '%s' is invalid, value should be greater than or equal to zero."
% (timer_name, timeout)
)
def transport_test(self, connect_timeout):
"""This method enables wait_for_connection to work.
As it is used by wait_for_connection, it is called by that module's action plugin,
which is on the controller process, which means that nothing done on this instance
should impact the actual persistent connection... this check is for informational
purposes only and should be properly cleaned up.
"""
# Force a fresh connect if for some reason we have connected before.
self.close()
self._connect()
self.close()
def _get_terminal_std_re(self, option):
terminal_std_option = self.get_option(option)
terminal_std_re = []
if terminal_std_option:
for item in terminal_std_option:
if "pattern" not in item:
raise AnsibleConnectionFailure(
"'pattern' is a required key for option '%s',"
" received option value is %s" % (option, item)
)
pattern = br"%s" % to_bytes(item["pattern"])
flag = item.get("flags", 0)
if flag:
flag = getattr(re, flag.split(".")[1])
terminal_std_re.append(re.compile(pattern, flag))
else:
# To maintain backward compatibility
terminal_std_re = getattr(self._terminal, option)
return terminal_std_re

@ -0,0 +1,97 @@
# 2017 Red Hat Inc.
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """author: Ansible Core Team
connection: persistent
short_description: Use a persistent unix socket for connection
description:
- This is a helper plugin to allow making other connections persistent.
options:
persistent_command_timeout:
type: int
description:
- Configures, in seconds, the amount of time to wait for a command to return from
the remote device. If this timer is exceeded before the command returns, the
connection plugin will raise an exception and close
default: 10
ini:
- section: persistent_connection
key: command_timeout
env:
- name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT
vars:
- name: ansible_command_timeout
"""
from ansible.executor.task_executor import start_connection
from ansible.plugins.connection import ConnectionBase
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection as SocketConnection
from ansible.utils.display import Display
display = Display()
class Connection(ConnectionBase):
""" Local based connections """
transport = "ansible.netcommon.persistent"
has_pipelining = False
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(
play_context, new_stdin, *args, **kwargs
)
self._task_uuid = to_text(kwargs.get("task_uuid", ""))
def _connect(self):
self._connected = True
return self
def exec_command(self, cmd, in_data=None, sudoable=True):
display.vvvv(
"exec_command(), socket_path=%s" % self.socket_path,
host=self._play_context.remote_addr,
)
connection = SocketConnection(self.socket_path)
out = connection.exec_command(cmd, in_data=in_data, sudoable=sudoable)
return 0, out, ""
def put_file(self, in_path, out_path):
pass
def fetch_file(self, in_path, out_path):
pass
def close(self):
self._connected = False
def run(self):
"""Returns the path of the persistent connection socket.
Attempts to ensure (within playcontext.timeout seconds) that the
socket path exists. If the path exists (or the timeout has expired),
returns the socket path.
"""
display.vvvv(
"starting connection from persistent connection plugin",
host=self._play_context.remote_addr,
)
variables = {
"ansible_command_timeout": self.get_option(
"persistent_command_timeout"
)
}
socket_path = start_connection(
self._play_context, variables, self._task_uuid
)
display.vvvv(
"local domain socket path is %s" % socket_path,
host=self._play_context.remote_addr,
)
setattr(self, "_socket_path", socket_path)
return socket_path

@ -0,0 +1,66 @@
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = r"""options:
host:
description:
- Specifies the DNS host name or address for connecting to the remote device over
the specified transport. The value of host is used as the destination address
for the transport.
type: str
required: true
port:
description:
- Specifies the port to use when building the connection to the remote device. The
port value will default to port 830.
type: int
default: 830
username:
description:
- Configures the username to use to authenticate the connection to the remote
device. This value is used to authenticate the SSH session. If the value is
not specified in the task, the value of environment variable C(ANSIBLE_NET_USERNAME)
will be used instead.
type: str
password:
description:
- Specifies the password to use to authenticate the connection to the remote device. This
value is used to authenticate the SSH session. If the value is not specified
in the task, the value of environment variable C(ANSIBLE_NET_PASSWORD) will
be used instead.
type: str
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device for
either connecting or sending commands. If the timeout is exceeded before the
operation is completed, the module will error.
type: int
default: 10
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to the remote device. This
value is the path to the key used to authenticate the SSH session. If the value
is not specified in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE)
will be used instead.
type: path
hostkey_verify:
description:
- If set to C(yes), the ssh host key of the device must match a ssh key present
on the host if set to C(no), the ssh host key of the device is not checked.
type: bool
default: true
look_for_keys:
description:
- Enables looking in the usual locations for the ssh keys (e.g. :file:`~/.ssh/id_*`)
type: bool
default: true
notes:
- For information on using netconf see the :ref:`Platform Options guide using Netconf<netconf_enabled_platform_options>`
- For more information on using Ansible to manage network devices see the :ref:`Ansible
Network Guide <network_guide>`
"""

@ -0,0 +1,14 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2019 Ansible, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = r"""options: {}
notes:
- This module is supported on C(ansible_network_os) network platforms. See the :ref:`Network
Platform Options <platform_options>` for details.
"""

@ -0,0 +1,531 @@
#
# {c) 2017 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import re
import os
import traceback
import string
from xml.etree.ElementTree import fromstring
from ansible.module_utils._text import to_native, to_text
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
Template,
)
from ansible.module_utils.six import iteritems, string_types
from ansible.module_utils.common._collections_compat import Mapping
from ansible.errors import AnsibleError, AnsibleFilterError
from ansible.utils.display import Display
from ansible.utils.encrypt import passlib_or_crypt, random_password
try:
import yaml
HAS_YAML = True
except ImportError:
HAS_YAML = False
try:
import textfsm
HAS_TEXTFSM = True
except ImportError:
HAS_TEXTFSM = False
display = Display()
def re_matchall(regex, value):
objects = list()
for match in re.findall(regex.pattern, value, re.M):
obj = {}
if regex.groupindex:
for name, index in iteritems(regex.groupindex):
if len(regex.groupindex) == 1:
obj[name] = match
else:
obj[name] = match[index - 1]
objects.append(obj)
return objects
def re_search(regex, value):
obj = {}
match = regex.search(value, re.M)
if match:
items = list(match.groups())
if regex.groupindex:
for name, index in iteritems(regex.groupindex):
obj[name] = items[index - 1]
return obj
def parse_cli(output, tmpl):
if not isinstance(output, string_types):
raise AnsibleError(
"parse_cli input should be a string, but was given a input of %s"
% (type(output))
)
if not os.path.exists(tmpl):
raise AnsibleError("unable to locate parse_cli template: %s" % tmpl)
try:
template = Template()
except ImportError as exc:
raise AnsibleError(to_native(exc))
with open(tmpl) as tmpl_fh:
tmpl_content = tmpl_fh.read()
spec = yaml.safe_load(tmpl_content)
obj = {}
for name, attrs in iteritems(spec["keys"]):
value = attrs["value"]
try:
variables = spec.get("vars", {})
value = template(value, variables)
except Exception:
pass
if "start_block" in attrs and "end_block" in attrs:
start_block = re.compile(attrs["start_block"])
end_block = re.compile(attrs["end_block"])
blocks = list()
lines = None
block_started = False
for line in output.split("\n"):
match_start = start_block.match(line)
match_end = end_block.match(line)
if match_start:
lines = list()
lines.append(line)
block_started = True
elif match_end:
if lines:
lines.append(line)
blocks.append("\n".join(lines))
block_started = False
elif block_started:
if lines:
lines.append(line)
regex_items = [re.compile(r) for r in attrs["items"]]
objects = list()
for block in blocks:
if isinstance(value, Mapping) and "key" not in value:
items = list()
for regex in regex_items:
match = regex.search(block)
if match:
item_values = match.groupdict()
item_values["match"] = list(match.groups())
items.append(item_values)
else:
items.append(None)
obj = {}
for k, v in iteritems(value):
try:
obj[k] = template(
v, {"item": items}, fail_on_undefined=False
)
except Exception:
obj[k] = None
objects.append(obj)
elif isinstance(value, Mapping):
items = list()
for regex in regex_items:
match = regex.search(block)
if match:
item_values = match.groupdict()
item_values["match"] = list(match.groups())
items.append(item_values)
else:
items.append(None)
key = template(value["key"], {"item": items})
values = dict(
[
(k, template(v, {"item": items}))
for k, v in iteritems(value["values"])
]
)
objects.append({key: values})
return objects
elif "items" in attrs:
regexp = re.compile(attrs["items"])
when = attrs.get("when")
conditional = (
"{%% if %s %%}True{%% else %%}False{%% endif %%}" % when
)
if isinstance(value, Mapping) and "key" not in value:
values = list()
for item in re_matchall(regexp, output):
entry = {}
for item_key, item_value in iteritems(value):
entry[item_key] = template(item_value, {"item": item})
if when:
if template(conditional, {"item": entry}):
values.append(entry)
else:
values.append(entry)
obj[name] = values
elif isinstance(value, Mapping):
values = dict()
for item in re_matchall(regexp, output):
entry = {}
for item_key, item_value in iteritems(value["values"]):
entry[item_key] = template(item_value, {"item": item})
key = template(value["key"], {"item": item})
if when:
if template(
conditional, {"item": {"key": key, "value": entry}}
):
values[key] = entry
else:
values[key] = entry
obj[name] = values
else:
item = re_search(regexp, output)
obj[name] = template(value, {"item": item})
else:
obj[name] = value
return obj
def parse_cli_textfsm(value, template):
if not HAS_TEXTFSM:
raise AnsibleError(
"parse_cli_textfsm filter requires TextFSM library to be installed"
)
if not isinstance(value, string_types):
raise AnsibleError(
"parse_cli_textfsm input should be a string, but was given a input of %s"
% (type(value))
)
if not os.path.exists(template):
raise AnsibleError(
"unable to locate parse_cli_textfsm template: %s" % template
)
try:
template = open(template)
except IOError as exc:
raise AnsibleError(to_native(exc))
re_table = textfsm.TextFSM(template)
fsm_results = re_table.ParseText(value)
results = list()
for item in fsm_results:
results.append(dict(zip(re_table.header, item)))
return results
def _extract_param(template, root, attrs, value):
key = None
when = attrs.get("when")
conditional = "{%% if %s %%}True{%% else %%}False{%% endif %%}" % when
param_to_xpath_map = attrs["items"]
if isinstance(value, Mapping):
key = value.get("key", None)
if key:
value = value["values"]
entries = dict() if key else list()
for element in root.findall(attrs["top"]):
entry = dict()
item_dict = dict()
for param, param_xpath in iteritems(param_to_xpath_map):
fields = None
try:
fields = element.findall(param_xpath)
except Exception:
display.warning(
"Failed to evaluate value of '%s' with XPath '%s'.\nUnexpected error: %s."
% (param, param_xpath, traceback.format_exc())
)
tags = param_xpath.split("/")
# check if xpath ends with attribute.
# If yes set attribute key/value dict to param value in case attribute matches
# else if it is a normal xpath assign matched element text value.
if len(tags) and tags[-1].endswith("]"):
if fields:
if len(fields) > 1:
item_dict[param] = [field.attrib for field in fields]
else:
item_dict[param] = fields[0].attrib
else:
item_dict[param] = {}
else:
if fields:
if len(fields) > 1:
item_dict[param] = [field.text for field in fields]
else:
item_dict[param] = fields[0].text
else:
item_dict[param] = None
if isinstance(value, Mapping):
for item_key, item_value in iteritems(value):
entry[item_key] = template(item_value, {"item": item_dict})
else:
entry = template(value, {"item": item_dict})
if key:
expanded_key = template(key, {"item": item_dict})
if when:
if template(
conditional,
{"item": {"key": expanded_key, "value": entry}},
):
entries[expanded_key] = entry
else:
entries[expanded_key] = entry
else:
if when:
if template(conditional, {"item": entry}):
entries.append(entry)
else:
entries.append(entry)
return entries
def parse_xml(output, tmpl):
if not os.path.exists(tmpl):
raise AnsibleError("unable to locate parse_xml template: %s" % tmpl)
if not isinstance(output, string_types):
raise AnsibleError(
"parse_xml works on string input, but given input of : %s"
% type(output)
)
root = fromstring(output)
try:
template = Template()
except ImportError as exc:
raise AnsibleError(to_native(exc))
with open(tmpl) as tmpl_fh:
tmpl_content = tmpl_fh.read()
spec = yaml.safe_load(tmpl_content)
obj = {}
for name, attrs in iteritems(spec["keys"]):
value = attrs["value"]
try:
variables = spec.get("vars", {})
value = template(value, variables)
except Exception:
pass
if "items" in attrs:
obj[name] = _extract_param(template, root, attrs, value)
else:
obj[name] = value
return obj
def type5_pw(password, salt=None):
if not isinstance(password, string_types):
raise AnsibleFilterError(
"type5_pw password input should be a string, but was given a input of %s"
% (type(password).__name__)
)
salt_chars = u"".join(
(to_text(string.ascii_letters), to_text(string.digits), u"./")
)
if salt is not None and not isinstance(salt, string_types):
raise AnsibleFilterError(
"type5_pw salt input should be a string, but was given a input of %s"
% (type(salt).__name__)
)
elif not salt:
salt = random_password(length=4, chars=salt_chars)
elif not set(salt) <= set(salt_chars):
raise AnsibleFilterError(
"type5_pw salt used inproper characters, must be one of %s"
% (salt_chars)
)
encrypted_password = passlib_or_crypt(password, "md5_crypt", salt=salt)
return encrypted_password
def hash_salt(password):
split_password = password.split("$")
if len(split_password) != 4:
raise AnsibleFilterError(
"Could not parse salt out password correctly from {0}".format(
password
)
)
else:
return split_password[2]
def comp_type5(
unencrypted_password, encrypted_password, return_original=False
):
salt = hash_salt(encrypted_password)
if type5_pw(unencrypted_password, salt) == encrypted_password:
if return_original is True:
return encrypted_password
else:
return True
return False
def vlan_parser(vlan_list, first_line_len=48, other_line_len=44):
"""
Input: Unsorted list of vlan integers
Output: Sorted string list of integers according to IOS-like vlan list rules
1. Vlans are listed in ascending order
2. Runs of 3 or more consecutive vlans are listed with a dash
3. The first line of the list can be first_line_len characters long
4. Subsequent list lines can be other_line_len characters
"""
# Sort and remove duplicates
sorted_list = sorted(set(vlan_list))
if sorted_list[0] < 1 or sorted_list[-1] > 4094:
raise AnsibleFilterError("Valid VLAN range is 1-4094")
parse_list = []
idx = 0
while idx < len(sorted_list):
start = idx
end = start
while end < len(sorted_list) - 1:
if sorted_list[end + 1] - sorted_list[end] == 1:
end += 1
else:
break
if start == end:
# Single VLAN
parse_list.append(str(sorted_list[idx]))
elif start + 1 == end:
# Run of 2 VLANs
parse_list.append(str(sorted_list[start]))
parse_list.append(str(sorted_list[end]))
else:
# Run of 3 or more VLANs
parse_list.append(
str(sorted_list[start]) + "-" + str(sorted_list[end])
)
idx = end + 1
line_count = 0
result = [""]
for vlans in parse_list:
# First line (" switchport trunk allowed vlan ")
if line_count == 0:
if len(result[line_count] + vlans) > first_line_len:
result.append("")
line_count += 1
result[line_count] += vlans + ","
else:
result[line_count] += vlans + ","
# Subsequent lines (" switchport trunk allowed vlan add ")
else:
if len(result[line_count] + vlans) > other_line_len:
result.append("")
line_count += 1
result[line_count] += vlans + ","
else:
result[line_count] += vlans + ","
# Remove trailing orphan commas
for idx in range(0, len(result)):
result[idx] = result[idx].rstrip(",")
# Sometimes text wraps to next line, but there are no remaining VLANs
if "" in result:
result.remove("")
return result
class FilterModule(object):
"""Filters for working with output from network devices"""
filter_map = {
"parse_cli": parse_cli,
"parse_cli_textfsm": parse_cli_textfsm,
"parse_xml": parse_xml,
"type5_pw": type5_pw,
"hash_salt": hash_salt,
"comp_type5": comp_type5,
"vlan_parser": vlan_parser,
}
def filters(self):
return self.filter_map

@ -0,0 +1,91 @@
# Copyright (c) 2018 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """author: Ansible Networking Team
httpapi: restconf
short_description: HttpApi Plugin for devices supporting Restconf API
description:
- This HttpApi plugin provides methods to connect to Restconf API endpoints.
options:
root_path:
type: str
description:
- Specifies the location of the Restconf root.
default: /restconf
vars:
- name: ansible_httpapi_restconf_root
"""
import json
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import ConnectionError
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.plugins.httpapi import HttpApiBase
CONTENT_TYPE = "application/yang-data+json"
class HttpApi(HttpApiBase):
def send_request(self, data, **message_kwargs):
if data:
data = json.dumps(data)
path = "/".join(
[
self.get_option("root_path").rstrip("/"),
message_kwargs.get("path", "").lstrip("/"),
]
)
headers = {
"Content-Type": message_kwargs.get("content_type") or CONTENT_TYPE,
"Accept": message_kwargs.get("accept") or CONTENT_TYPE,
}
response, response_data = self.connection.send(
path, data, headers=headers, method=message_kwargs.get("method")
)
return handle_response(response, response_data)
def handle_response(response, response_data):
try:
response_data = json.loads(response_data.read())
except ValueError:
response_data = response_data.read()
if isinstance(response, HTTPError):
if response_data:
if "errors" in response_data:
errors = response_data["errors"]["error"]
error_text = "\n".join(
(error["error-message"] for error in errors)
)
else:
error_text = response_data
raise ConnectionError(error_text, code=response.code)
raise ConnectionError(to_text(response), code=response.code)
return response_data

@ -0,0 +1,27 @@
#
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The base class for all resource modules
"""
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network import (
get_resource_connection,
)
class ConfigBase(object):
""" The base class for all resource modules
"""
ACTION_STATES = ["merged", "replaced", "overridden", "deleted"]
def __init__(self, module):
self._module = module
self.state = module.params["state"]
self._connection = None
if self.state not in ["rendered", "parsed"]:
self._connection = get_resource_connection(module)

@ -0,0 +1,473 @@
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import re
import hashlib
from ansible.module_utils.six.moves import zip
from ansible.module_utils._text import to_bytes, to_native
DEFAULT_COMMENT_TOKENS = ["#", "!", "/*", "*/", "echo"]
DEFAULT_IGNORE_LINES_RE = set(
[
re.compile(r"Using \d+ out of \d+ bytes"),
re.compile(r"Building configuration"),
re.compile(r"Current configuration : \d+ bytes"),
]
)
try:
Pattern = re._pattern_type
except AttributeError:
Pattern = re.Pattern
class ConfigLine(object):
def __init__(self, raw):
self.text = str(raw).strip()
self.raw = raw
self._children = list()
self._parents = list()
def __str__(self):
return self.raw
def __eq__(self, other):
return self.line == other.line
def __ne__(self, other):
return not self.__eq__(other)
def __getitem__(self, key):
for item in self._children:
if item.text == key:
return item
raise KeyError(key)
@property
def line(self):
line = self.parents
line.append(self.text)
return " ".join(line)
@property
def children(self):
return _obj_to_text(self._children)
@property
def child_objs(self):
return self._children
@property
def parents(self):
return _obj_to_text(self._parents)
@property
def path(self):
config = _obj_to_raw(self._parents)
config.append(self.raw)
return "\n".join(config)
@property
def has_children(self):
return len(self._children) > 0
@property
def has_parents(self):
return len(self._parents) > 0
def add_child(self, obj):
if not isinstance(obj, ConfigLine):
raise AssertionError("child must be of type `ConfigLine`")
self._children.append(obj)
def ignore_line(text, tokens=None):
for item in tokens or DEFAULT_COMMENT_TOKENS:
if text.startswith(item):
return True
for regex in DEFAULT_IGNORE_LINES_RE:
if regex.match(text):
return True
def _obj_to_text(x):
return [o.text for o in x]
def _obj_to_raw(x):
return [o.raw for o in x]
def _obj_to_block(objects, visited=None):
items = list()
for o in objects:
if o not in items:
items.append(o)
for child in o._children:
if child not in items:
items.append(child)
return _obj_to_raw(items)
def dumps(objects, output="block", comments=False):
if output == "block":
items = _obj_to_block(objects)
elif output == "commands":
items = _obj_to_text(objects)
elif output == "raw":
items = _obj_to_raw(objects)
else:
raise TypeError("unknown value supplied for keyword output")
if output == "block":
if comments:
for index, item in enumerate(items):
nextitem = index + 1
if (
nextitem < len(items)
and not item.startswith(" ")
and items[nextitem].startswith(" ")
):
item = "!\n%s" % item
items[index] = item
items.append("!")
items.append("end")
return "\n".join(items)
class NetworkConfig(object):
def __init__(self, indent=1, contents=None, ignore_lines=None):
self._indent = indent
self._items = list()
self._config_text = None
if ignore_lines:
for item in ignore_lines:
if not isinstance(item, Pattern):
item = re.compile(item)
DEFAULT_IGNORE_LINES_RE.add(item)
if contents:
self.load(contents)
@property
def items(self):
return self._items
@property
def config_text(self):
return self._config_text
@property
def sha1(self):
sha1 = hashlib.sha1()
sha1.update(to_bytes(str(self), errors="surrogate_or_strict"))
return sha1.digest()
def __getitem__(self, key):
for line in self:
if line.text == key:
return line
raise KeyError(key)
def __iter__(self):
return iter(self._items)
def __str__(self):
return "\n".join([c.raw for c in self.items])
def __len__(self):
return len(self._items)
def load(self, s):
self._config_text = s
self._items = self.parse(s)
def loadfp(self, fp):
with open(fp) as f:
return self.load(f.read())
def parse(self, lines, comment_tokens=None):
toplevel = re.compile(r"\S")
childline = re.compile(r"^\s*(.+)$")
entry_reg = re.compile(r"([{};])")
ancestors = list()
config = list()
indents = [0]
for linenum, line in enumerate(
to_native(lines, errors="surrogate_or_strict").split("\n")
):
text = entry_reg.sub("", line).strip()
cfg = ConfigLine(line)
if not text or ignore_line(text, comment_tokens):
continue
# handle top level commands
if toplevel.match(line):
ancestors = [cfg]
indents = [0]
# handle sub level commands
else:
match = childline.match(line)
line_indent = match.start(1)
if line_indent < indents[-1]:
while indents[-1] > line_indent:
indents.pop()
if line_indent > indents[-1]:
indents.append(line_indent)
curlevel = len(indents) - 1
parent_level = curlevel - 1
cfg._parents = ancestors[:curlevel]
if curlevel > len(ancestors):
config.append(cfg)
continue
for i in range(curlevel, len(ancestors)):
ancestors.pop()
ancestors.append(cfg)
ancestors[parent_level].add_child(cfg)
config.append(cfg)
return config
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
if item.parents == path[:-1]:
return item
def get_block(self, path):
if not isinstance(path, list):
raise AssertionError("path argument must be a list object")
obj = self.get_object(path)
if not obj:
raise ValueError("path does not exist in config")
return self._expand_block(obj)
def get_block_config(self, path):
block = self.get_block(path)
return dumps(block, "block")
def _expand_block(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj._children:
if child in S:
continue
self._expand_block(child, S)
return S
def _diff_line(self, other):
updates = list()
for item in self.items:
if item not in other:
updates.append(item)
return updates
def _diff_strict(self, other):
updates = list()
# block extracted from other does not have all parents
# but the last one. In case of multiple parents we need
# to add additional parents.
if other and isinstance(other, list) and len(other) > 0:
start_other = other[0]
if start_other.parents:
for parent in start_other.parents:
other.insert(0, ConfigLine(parent))
for index, line in enumerate(self.items):
try:
if str(line).strip() != str(other[index]).strip():
updates.append(line)
except (AttributeError, IndexError):
updates.append(line)
return updates
def _diff_exact(self, other):
updates = list()
if len(other) != len(self.items):
updates.extend(self.items)
else:
for ours, theirs in zip(self.items, other):
if ours != theirs:
updates.extend(self.items)
break
return updates
def difference(self, other, match="line", path=None, replace=None):
"""Perform a config diff against the another network config
:param other: instance of NetworkConfig to diff against
:param match: type of diff to perform. valid values are 'line',
'strict', 'exact'
:param path: context in the network config to filter the diff
:param replace: the method used to generate the replacement lines.
valid values are 'block', 'line'
:returns: a string of lines that are different
"""
if path and match != "line":
try:
other = other.get_block(path)
except ValueError:
other = list()
else:
other = other.items
# generate a list of ConfigLines that aren't in other
meth = getattr(self, "_diff_%s" % match)
updates = meth(other)
if replace == "block":
parents = list()
for item in updates:
if not item.has_parents:
parents.append(item)
else:
for p in item._parents:
if p not in parents:
parents.append(p)
updates = list()
for item in parents:
updates.extend(self._expand_block(item))
visited = set()
expanded = list()
for item in updates:
for p in item._parents:
if p.line not in visited:
visited.add(p.line)
expanded.append(p)
expanded.append(item)
visited.add(item.line)
return expanded
def add(self, lines, parents=None):
ancestors = list()
offset = 0
obj = None
# global config command
if not parents:
for line in lines:
# handle ignore lines
if ignore_line(line):
continue
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_block(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self._indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj._parents = list(ancestors)
ancestors[-1]._children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in lines:
# handle ignore lines
if ignore_line(line):
continue
# check if child already exists
for child in ancestors[-1]._children:
if child.text == line:
break
else:
offset = len(parents) * self._indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item._parents = ancestors
ancestors[-1]._children.append(item)
self.items.append(item)
class CustomNetworkConfig(NetworkConfig):
def items_text(self):
return [item.text for item in self.items]
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.child_objs:
if child in S:
continue
self.expand_section(child, S)
return S
def to_block(self, section):
return "\n".join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError("path does not exist in config")
return self.expand_section(obj)

@ -0,0 +1,162 @@
#
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The facts base class
this contains methods common to all facts subsets
"""
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network import (
get_resource_connection,
)
from ansible.module_utils.six import iteritems
class FactsBase(object):
"""
The facts base class
"""
def __init__(self, module):
self._module = module
self._warnings = []
self._gather_subset = module.params.get("gather_subset")
self._gather_network_resources = module.params.get(
"gather_network_resources"
)
self._connection = None
if module.params.get("state") not in ["rendered", "parsed"]:
self._connection = get_resource_connection(module)
self.ansible_facts = {"ansible_network_resources": {}}
self.ansible_facts["ansible_net_gather_network_resources"] = list()
self.ansible_facts["ansible_net_gather_subset"] = list()
if not self._gather_subset:
self._gather_subset = ["!config"]
if not self._gather_network_resources:
self._gather_network_resources = ["!all"]
def gen_runable(self, subsets, valid_subsets, resource_facts=False):
""" Generate the runable subset
:param module: The module instance
:param subsets: The provided subsets
:param valid_subsets: The valid subsets
:param resource_facts: A boolean flag
:rtype: list
:returns: The runable subsets
"""
runable_subsets = set()
exclude_subsets = set()
minimal_gather_subset = set()
if not resource_facts:
minimal_gather_subset = frozenset(["default"])
for subset in subsets:
if subset == "all":
runable_subsets.update(valid_subsets)
continue
if subset == "min" and minimal_gather_subset:
runable_subsets.update(minimal_gather_subset)
continue
if subset.startswith("!"):
subset = subset[1:]
if subset == "min":
exclude_subsets.update(minimal_gather_subset)
continue
if subset == "all":
exclude_subsets.update(
valid_subsets - minimal_gather_subset
)
continue
exclude = True
else:
exclude = False
if subset not in valid_subsets:
self._module.fail_json(
msg="Subset must be one of [%s], got %s"
% (
", ".join(sorted([item for item in valid_subsets])),
subset,
)
)
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(valid_subsets)
runable_subsets.difference_update(exclude_subsets)
return runable_subsets
def get_network_resources_facts(
self, facts_resource_obj_map, resource_facts_type=None, data=None
):
"""
:param fact_resource_subsets:
:param data: previously collected configuration
:return:
"""
if not resource_facts_type:
resource_facts_type = self._gather_network_resources
restorun_subsets = self.gen_runable(
resource_facts_type,
frozenset(facts_resource_obj_map.keys()),
resource_facts=True,
)
if restorun_subsets:
self.ansible_facts["ansible_net_gather_network_resources"] = list(
restorun_subsets
)
instances = list()
for key in restorun_subsets:
fact_cls_obj = facts_resource_obj_map.get(key)
if fact_cls_obj:
instances.append(fact_cls_obj(self._module))
else:
self._warnings.extend(
[
"network resource fact gathering for '%s' is not supported"
% key
]
)
for inst in instances:
inst.populate_facts(self._connection, self.ansible_facts, data)
def get_network_legacy_facts(
self, fact_legacy_obj_map, legacy_facts_type=None
):
if not legacy_facts_type:
legacy_facts_type = self._gather_subset
runable_subsets = self.gen_runable(
legacy_facts_type, frozenset(fact_legacy_obj_map.keys())
)
if runable_subsets:
facts = dict()
# default subset should always returned be with legacy facts subsets
if "default" not in runable_subsets:
runable_subsets.add("default")
self.ansible_facts["ansible_net_gather_subset"] = list(
runable_subsets
)
instances = list()
for key in runable_subsets:
instances.append(fact_legacy_obj_map[key](self._module))
for inst in instances:
inst.populate()
facts.update(inst.facts)
self._warnings.extend(inst.warnings)
for key, value in iteritems(facts):
key = "ansible_net_%s" % key
self.ansible_facts[key] = value

@ -0,0 +1,179 @@
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2017 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys
from ansible.module_utils._text import to_text, to_bytes
from ansible.module_utils.connection import Connection, ConnectionError
try:
from ncclient.xml_ import NCElement, new_ele, sub_ele
HAS_NCCLIENT = True
except (ImportError, AttributeError):
HAS_NCCLIENT = False
try:
from lxml.etree import Element, fromstring, XMLSyntaxError
except ImportError:
from xml.etree.ElementTree import Element, fromstring
if sys.version_info < (2, 7):
from xml.parsers.expat import ExpatError as XMLSyntaxError
else:
from xml.etree.ElementTree import ParseError as XMLSyntaxError
NS_MAP = {"nc": "urn:ietf:params:xml:ns:netconf:base:1.0"}
def exec_rpc(module, *args, **kwargs):
connection = NetconfConnection(module._socket_path)
return connection.execute_rpc(*args, **kwargs)
class NetconfConnection(Connection):
def __init__(self, socket_path):
super(NetconfConnection, self).__init__(socket_path)
def __rpc__(self, name, *args, **kwargs):
"""Executes the json-rpc and returns the output received
from remote device.
:name: rpc method to be executed over connection plugin that implements jsonrpc 2.0
:args: Ordered list of params passed as arguments to rpc method
:kwargs: Dict of valid key, value pairs passed as arguments to rpc method
For usage refer the respective connection plugin docs.
"""
self.check_rc = kwargs.pop("check_rc", True)
self.ignore_warning = kwargs.pop("ignore_warning", True)
response = self._exec_jsonrpc(name, *args, **kwargs)
if "error" in response:
rpc_error = response["error"].get("data")
return self.parse_rpc_error(
to_bytes(rpc_error, errors="surrogate_then_replace")
)
return fromstring(
to_bytes(response["result"], errors="surrogate_then_replace")
)
def parse_rpc_error(self, rpc_error):
if self.check_rc:
try:
error_root = fromstring(rpc_error)
root = Element("root")
root.append(error_root)
error_list = root.findall(".//nc:rpc-error", NS_MAP)
if not error_list:
raise ConnectionError(
to_text(rpc_error, errors="surrogate_then_replace")
)
warnings = []
for error in error_list:
message_ele = error.find("./nc:error-message", NS_MAP)
if message_ele is None:
message_ele = error.find("./nc:error-info", NS_MAP)
message = (
message_ele.text if message_ele is not None else None
)
severity = error.find("./nc:error-severity", NS_MAP).text
if (
severity == "warning"
and self.ignore_warning
and message is not None
):
warnings.append(message)
else:
raise ConnectionError(
to_text(rpc_error, errors="surrogate_then_replace")
)
return warnings
except XMLSyntaxError:
raise ConnectionError(rpc_error)
def transform_reply():
return b"""<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/|comment()|processing-instruction()">
<xsl:copy>
<xsl:apply-templates/>
</xsl:copy>
</xsl:template>
<xsl:template match="*">
<xsl:element name="{local-name()}">
<xsl:apply-templates select="@*|node()"/>
</xsl:element>
</xsl:template>
<xsl:template match="@*">
<xsl:attribute name="{local-name()}">
<xsl:value-of select="."/>
</xsl:attribute>
</xsl:template>
</xsl:stylesheet>
"""
# Note: Workaround for ncclient 0.5.3
def remove_namespaces(data):
if not HAS_NCCLIENT:
raise ImportError(
"ncclient is required but does not appear to be installed. "
"It can be installed using `pip install ncclient`"
)
return NCElement(data, transform_reply()).data_xml
def build_root_xml_node(tag):
return new_ele(tag)
def build_child_xml_node(parent, tag, text=None, attrib=None):
element = sub_ele(parent, tag)
if text:
element.text = to_text(text)
if attrib:
element.attrib.update(attrib)
return element
def build_subtree(parent, path):
element = parent
for field in path.split("/"):
sub_element = build_child_xml_node(element, field)
element = sub_element
return element

@ -0,0 +1,275 @@
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2015 Peter Sprygada, <psprygada@ansible.com>
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import traceback
import json
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.connection import Connection, ConnectionError
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.netconf import (
NetconfConnection,
)
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import (
Cli,
)
from ansible.module_utils.six import iteritems
NET_TRANSPORT_ARGS = dict(
host=dict(required=True),
port=dict(type="int"),
username=dict(fallback=(env_fallback, ["ANSIBLE_NET_USERNAME"])),
password=dict(
no_log=True, fallback=(env_fallback, ["ANSIBLE_NET_PASSWORD"])
),
ssh_keyfile=dict(
fallback=(env_fallback, ["ANSIBLE_NET_SSH_KEYFILE"]), type="path"
),
authorize=dict(
default=False,
fallback=(env_fallback, ["ANSIBLE_NET_AUTHORIZE"]),
type="bool",
),
auth_pass=dict(
no_log=True, fallback=(env_fallback, ["ANSIBLE_NET_AUTH_PASS"])
),
provider=dict(type="dict", no_log=True),
transport=dict(choices=list()),
timeout=dict(default=10, type="int"),
)
NET_CONNECTION_ARGS = dict()
NET_CONNECTIONS = dict()
def _transitional_argument_spec():
argument_spec = {}
for key, value in iteritems(NET_TRANSPORT_ARGS):
value["required"] = False
argument_spec[key] = value
return argument_spec
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class ModuleStub(object):
def __init__(self, argument_spec, fail_json):
self.params = dict()
for key, value in argument_spec.items():
self.params[key] = value.get("default")
self.fail_json = fail_json
class NetworkError(Exception):
def __init__(self, msg, **kwargs):
super(NetworkError, self).__init__(msg)
self.kwargs = kwargs
class Config(object):
def __init__(self, connection):
self.connection = connection
def __call__(self, commands, **kwargs):
lines = to_list(commands)
return self.connection.configure(lines, **kwargs)
def load_config(self, commands, **kwargs):
commands = to_list(commands)
return self.connection.load_config(commands, **kwargs)
def get_config(self, **kwargs):
return self.connection.get_config(**kwargs)
def save_config(self):
return self.connection.save_config()
class NetworkModule(AnsibleModule):
def __init__(self, *args, **kwargs):
connect_on_load = kwargs.pop("connect_on_load", True)
argument_spec = NET_TRANSPORT_ARGS.copy()
argument_spec["transport"]["choices"] = NET_CONNECTIONS.keys()
argument_spec.update(NET_CONNECTION_ARGS.copy())
if kwargs.get("argument_spec"):
argument_spec.update(kwargs["argument_spec"])
kwargs["argument_spec"] = argument_spec
super(NetworkModule, self).__init__(*args, **kwargs)
self.connection = None
self._cli = None
self._config = None
try:
transport = self.params["transport"] or "__default__"
cls = NET_CONNECTIONS[transport]
self.connection = cls()
except KeyError:
self.fail_json(
msg="Unknown transport or no default transport specified"
)
except (TypeError, NetworkError) as exc:
self.fail_json(
msg=to_native(exc), exception=traceback.format_exc()
)
if connect_on_load:
self.connect()
@property
def cli(self):
if not self.connected:
self.connect()
if self._cli:
return self._cli
self._cli = Cli(self.connection)
return self._cli
@property
def config(self):
if not self.connected:
self.connect()
if self._config:
return self._config
self._config = Config(self.connection)
return self._config
@property
def connected(self):
return self.connection._connected
def _load_params(self):
super(NetworkModule, self)._load_params()
provider = self.params.get("provider") or dict()
for key, value in provider.items():
for args in [NET_TRANSPORT_ARGS, NET_CONNECTION_ARGS]:
if key in args:
if self.params.get(key) is None and value is not None:
self.params[key] = value
def connect(self):
try:
if not self.connected:
self.connection.connect(self.params)
if self.params["authorize"]:
self.connection.authorize(self.params)
self.log(
"connected to %s:%s using %s"
% (
self.params["host"],
self.params["port"],
self.params["transport"],
)
)
except NetworkError as exc:
self.fail_json(
msg=to_native(exc), exception=traceback.format_exc()
)
def disconnect(self):
try:
if self.connected:
self.connection.disconnect()
self.log("disconnected from %s" % self.params["host"])
except NetworkError as exc:
self.fail_json(
msg=to_native(exc), exception=traceback.format_exc()
)
def register_transport(transport, default=False):
def register(cls):
NET_CONNECTIONS[transport] = cls
if default:
NET_CONNECTIONS["__default__"] = cls
return cls
return register
def add_argument(key, value):
NET_CONNECTION_ARGS[key] = value
def get_resource_connection(module):
if hasattr(module, "_connection"):
return module._connection
capabilities = get_capabilities(module)
network_api = capabilities.get("network_api")
if network_api in ("cliconf", "nxapi", "eapi", "exosapi"):
module._connection = Connection(module._socket_path)
elif network_api == "netconf":
module._connection = NetconfConnection(module._socket_path)
elif network_api == "local":
# This isn't supported, but we shouldn't fail here.
# Set the connection to a fake connection so it fails sensibly.
module._connection = LocalResourceConnection(module)
else:
module.fail_json(
msg="Invalid connection type {0!s}".format(network_api)
)
return module._connection
def get_capabilities(module):
if hasattr(module, "capabilities"):
return module._capabilities
try:
capabilities = Connection(module._socket_path).get_capabilities()
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors="surrogate_then_replace"))
except AssertionError:
# No socket_path, connection most likely local.
return dict(network_api="local")
module._capabilities = json.loads(capabilities)
return module._capabilities
class LocalResourceConnection:
def __init__(self, module):
self.module = module
def get(self, *args, **kwargs):
self.module.fail_json(
msg="Network resource modules not supported over local connection."
)

@ -0,0 +1,316 @@
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2015 Peter Sprygada, <psprygada@ansible.com>
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import shlex
import time
from ansible.module_utils.parsing.convert_bool import (
BOOLEANS_TRUE,
BOOLEANS_FALSE,
)
from ansible.module_utils.six import string_types, text_type
from ansible.module_utils.six.moves import zip
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class FailedConditionsError(Exception):
def __init__(self, msg, failed_conditions):
super(FailedConditionsError, self).__init__(msg)
self.failed_conditions = failed_conditions
class FailedConditionalError(Exception):
def __init__(self, msg, failed_conditional):
super(FailedConditionalError, self).__init__(msg)
self.failed_conditional = failed_conditional
class AddCommandError(Exception):
def __init__(self, msg, command):
super(AddCommandError, self).__init__(msg)
self.command = command
class AddConditionError(Exception):
def __init__(self, msg, condition):
super(AddConditionError, self).__init__(msg)
self.condition = condition
class Cli(object):
def __init__(self, connection):
self.connection = connection
self.default_output = connection.default_output or "text"
self._commands = list()
@property
def commands(self):
return [str(c) for c in self._commands]
def __call__(self, commands, output=None):
objects = list()
for cmd in to_list(commands):
objects.append(self.to_command(cmd, output))
return self.connection.run_commands(objects)
def to_command(
self, command, output=None, prompt=None, response=None, **kwargs
):
output = output or self.default_output
if isinstance(command, Command):
return command
if isinstance(prompt, string_types):
prompt = re.compile(re.escape(prompt))
return Command(
command, output, prompt=prompt, response=response, **kwargs
)
def add_commands(self, commands, output=None, **kwargs):
for cmd in commands:
self._commands.append(self.to_command(cmd, output, **kwargs))
def run_commands(self):
responses = self.connection.run_commands(self._commands)
for resp, cmd in zip(responses, self._commands):
cmd.response = resp
# wipe out the commands list to avoid issues if additional
# commands are executed later
self._commands = list()
return responses
class Command(object):
def __init__(
self, command, output=None, prompt=None, response=None, **kwargs
):
self.command = command
self.output = output
self.command_string = command
self.prompt = prompt
self.response = response
self.args = kwargs
def __str__(self):
return self.command_string
class CommandRunner(object):
def __init__(self, module):
self.module = module
self.items = list()
self.conditionals = set()
self.commands = list()
self.retries = 10
self.interval = 1
self.match = "all"
self._default_output = module.connection.default_output
def add_command(
self, command, output=None, prompt=None, response=None, **kwargs
):
if command in [str(c) for c in self.commands]:
raise AddCommandError(
"duplicated command detected", command=command
)
cmd = self.module.cli.to_command(
command, output=output, prompt=prompt, response=response, **kwargs
)
self.commands.append(cmd)
def get_command(self, command, output=None):
for cmd in self.commands:
if cmd.command == command:
return cmd.response
raise ValueError("command '%s' not found" % command)
def get_responses(self):
return [cmd.response for cmd in self.commands]
def add_conditional(self, condition):
try:
self.conditionals.add(Conditional(condition))
except AttributeError as exc:
raise AddConditionError(msg=str(exc), condition=condition)
def run(self):
while self.retries > 0:
self.module.cli.add_commands(self.commands)
responses = self.module.cli.run_commands()
for item in list(self.conditionals):
if item(responses):
if self.match == "any":
return item
self.conditionals.remove(item)
if not self.conditionals:
break
time.sleep(self.interval)
self.retries -= 1
else:
failed_conditions = [item.raw for item in self.conditionals]
errmsg = (
"One or more conditional statements have not been satisfied"
)
raise FailedConditionsError(errmsg, failed_conditions)
class Conditional(object):
"""Used in command modules to evaluate waitfor conditions
"""
OPERATORS = {
"eq": ["eq", "=="],
"neq": ["neq", "ne", "!="],
"gt": ["gt", ">"],
"ge": ["ge", ">="],
"lt": ["lt", "<"],
"le": ["le", "<="],
"contains": ["contains"],
"matches": ["matches"],
}
def __init__(self, conditional, encoding=None):
self.raw = conditional
self.negate = False
try:
components = shlex.split(conditional)
key, val = components[0], components[-1]
op_components = components[1:-1]
if "not" in op_components:
self.negate = True
op_components.pop(op_components.index("not"))
op = op_components[0]
except ValueError:
raise ValueError("failed to parse conditional")
self.key = key
self.func = self._func(op)
self.value = self._cast_value(val)
def __call__(self, data):
value = self.get_value(dict(result=data))
if not self.negate:
return self.func(value)
else:
return not self.func(value)
def _cast_value(self, value):
if value in BOOLEANS_TRUE:
return True
elif value in BOOLEANS_FALSE:
return False
elif re.match(r"^\d+\.d+$", value):
return float(value)
elif re.match(r"^\d+$", value):
return int(value)
else:
return text_type(value)
def _func(self, oper):
for func, operators in self.OPERATORS.items():
if oper in operators:
return getattr(self, func)
raise AttributeError("unknown operator: %s" % oper)
def get_value(self, result):
try:
return self.get_json(result)
except (IndexError, TypeError, AttributeError):
msg = "unable to apply conditional to result"
raise FailedConditionalError(msg, self.raw)
def get_json(self, result):
string = re.sub(r"\[[\'|\"]", ".", self.key)
string = re.sub(r"[\'|\"]\]", ".", string)
parts = re.split(r"\.(?=[^\]]*(?:\[|$))", string)
for part in parts:
match = re.findall(r"\[(\S+?)\]", part)
if match:
key = part[: part.find("[")]
result = result[key]
for m in match:
try:
m = int(m)
except ValueError:
m = str(m)
result = result[m]
else:
result = result.get(part)
return result
def number(self, value):
if "." in str(value):
return float(value)
else:
return int(value)
def eq(self, value):
return value == self.value
def neq(self, value):
return value != self.value
def gt(self, value):
return self.number(value) > self.value
def ge(self, value):
return self.number(value) >= self.value
def lt(self, value):
return self.number(value) < self.value
def le(self, value):
return self.number(value) <= self.value
def contains(self, value):
return str(self.value) in value
def matches(self, value):
match = re.search(self.value, value, re.M)
return match is not None

@ -0,0 +1,686 @@
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Networking tools for network modules only
import re
import ast
import operator
import socket
import json
from itertools import chain
from ansible.module_utils._text import to_text, to_bytes
from ansible.module_utils.common._collections_compat import Mapping
from ansible.module_utils.six import iteritems, string_types
from ansible.module_utils import basic
from ansible.module_utils.parsing.convert_bool import boolean
# Backwards compatibility for 3rd party modules
# TODO(pabelanger): With move to ansible.netcommon, we should clean this code
# up and have modules import directly themself.
from ansible.module_utils.common.network import ( # noqa: F401
to_bits,
is_netmask,
is_masklen,
to_netmask,
to_masklen,
to_subnet,
to_ipv6_network,
VALID_MASKS,
)
try:
from jinja2 import Environment, StrictUndefined
from jinja2.exceptions import UndefinedError
HAS_JINJA2 = True
except ImportError:
HAS_JINJA2 = False
OPERATORS = frozenset(["ge", "gt", "eq", "neq", "lt", "le"])
ALIASES = frozenset(
[("min", "ge"), ("max", "le"), ("exactly", "eq"), ("neq", "ne")]
)
def to_list(val):
if isinstance(val, (list, tuple, set)):
return list(val)
elif val is not None:
return [val]
else:
return list()
def to_lines(stdout):
for item in stdout:
if isinstance(item, string_types):
item = to_text(item).split("\n")
yield item
def transform_commands(module):
transform = ComplexList(
dict(
command=dict(key=True),
output=dict(),
prompt=dict(type="list"),
answer=dict(type="list"),
newline=dict(type="bool", default=True),
sendonly=dict(type="bool", default=False),
check_all=dict(type="bool", default=False),
),
module,
)
return transform(module.params["commands"])
def sort_list(val):
if isinstance(val, list):
return sorted(val)
return val
class Entity(object):
"""Transforms a dict to with an argument spec
This class will take a dict and apply an Ansible argument spec to the
values. The resulting dict will contain all of the keys in the param
with appropriate values set.
Example::
argument_spec = dict(
command=dict(key=True),
display=dict(default='text', choices=['text', 'json']),
validate=dict(type='bool')
)
transform = Entity(module, argument_spec)
value = dict(command='foo')
result = transform(value)
print result
{'command': 'foo', 'display': 'text', 'validate': None}
Supported argument spec:
* key - specifies how to map a single value to a dict
* read_from - read and apply the argument_spec from the module
* required - a value is required
* type - type of value (uses AnsibleModule type checker)
* fallback - implements fallback function
* choices - set of valid options
* default - default value
"""
def __init__(
self, module, attrs=None, args=None, keys=None, from_argspec=False
):
args = [] if args is None else args
self._attributes = attrs or {}
self._module = module
for arg in args:
self._attributes[arg] = dict()
if from_argspec:
self._attributes[arg]["read_from"] = arg
if keys and arg in keys:
self._attributes[arg]["key"] = True
self.attr_names = frozenset(self._attributes.keys())
_has_key = False
for name, attr in iteritems(self._attributes):
if attr.get("read_from"):
if attr["read_from"] not in self._module.argument_spec:
module.fail_json(
msg="argument %s does not exist" % attr["read_from"]
)
spec = self._module.argument_spec.get(attr["read_from"])
for key, value in iteritems(spec):
if key not in attr:
attr[key] = value
if attr.get("key"):
if _has_key:
module.fail_json(msg="only one key value can be specified")
_has_key = True
attr["required"] = True
def serialize(self):
return self._attributes
def to_dict(self, value):
obj = {}
for name, attr in iteritems(self._attributes):
if attr.get("key"):
obj[name] = value
else:
obj[name] = attr.get("default")
return obj
def __call__(self, value, strict=True):
if not isinstance(value, dict):
value = self.to_dict(value)
if strict:
unknown = set(value).difference(self.attr_names)
if unknown:
self._module.fail_json(
msg="invalid keys: %s" % ",".join(unknown)
)
for name, attr in iteritems(self._attributes):
if value.get(name) is None:
value[name] = attr.get("default")
if attr.get("fallback") and not value.get(name):
fallback = attr.get("fallback", (None,))
fallback_strategy = fallback[0]
fallback_args = []
fallback_kwargs = {}
if fallback_strategy is not None:
for item in fallback[1:]:
if isinstance(item, dict):
fallback_kwargs = item
else:
fallback_args = item
try:
value[name] = fallback_strategy(
*fallback_args, **fallback_kwargs
)
except basic.AnsibleFallbackNotFound:
continue
if attr.get("required") and value.get(name) is None:
self._module.fail_json(
msg="missing required attribute %s" % name
)
if "choices" in attr:
if value[name] not in attr["choices"]:
self._module.fail_json(
msg="%s must be one of %s, got %s"
% (name, ", ".join(attr["choices"]), value[name])
)
if value[name] is not None:
value_type = attr.get("type", "str")
type_checker = self._module._CHECK_ARGUMENT_TYPES_DISPATCHER[
value_type
]
type_checker(value[name])
elif value.get(name):
value[name] = self._module.params[name]
return value
class EntityCollection(Entity):
"""Extends ```Entity``` to handle a list of dicts """
def __call__(self, iterable, strict=True):
if iterable is None:
iterable = [
super(EntityCollection, self).__call__(
self._module.params, strict
)
]
if not isinstance(iterable, (list, tuple)):
self._module.fail_json(msg="value must be an iterable")
return [
(super(EntityCollection, self).__call__(i, strict))
for i in iterable
]
# these two are for backwards compatibility and can be removed once all of the
# modules that use them are updated
class ComplexDict(Entity):
def __init__(self, attrs, module, *args, **kwargs):
super(ComplexDict, self).__init__(module, attrs, *args, **kwargs)
class ComplexList(EntityCollection):
def __init__(self, attrs, module, *args, **kwargs):
super(ComplexList, self).__init__(module, attrs, *args, **kwargs)
def dict_diff(base, comparable):
""" Generate a dict object of differences
This function will compare two dict objects and return the difference
between them as a dict object. For scalar values, the key will reflect
the updated value. If the key does not exist in `comparable`, then then no
key will be returned. For lists, the value in comparable will wholly replace
the value in base for the key. For dicts, the returned value will only
return keys that are different.
:param base: dict object to base the diff on
:param comparable: dict object to compare against base
:returns: new dict object with differences
"""
if not isinstance(base, dict):
raise AssertionError("`base` must be of type <dict>")
if not isinstance(comparable, dict):
if comparable is None:
comparable = dict()
else:
raise AssertionError("`comparable` must be of type <dict>")
updates = dict()
for key, value in iteritems(base):
if isinstance(value, dict):
item = comparable.get(key)
if item is not None:
sub_diff = dict_diff(value, comparable[key])
if sub_diff:
updates[key] = sub_diff
else:
comparable_value = comparable.get(key)
if comparable_value is not None:
if sort_list(base[key]) != sort_list(comparable_value):
updates[key] = comparable_value
for key in set(comparable.keys()).difference(base.keys()):
updates[key] = comparable.get(key)
return updates
def dict_merge(base, other):
""" Return a new dict object that combines base and other
This will create a new dict object that is a combination of the key/value
pairs from base and other. When both keys exist, the value will be
selected from other. If the value is a list object, the two lists will
be combined and duplicate entries removed.
:param base: dict object to serve as base
:param other: dict object to combine with base
:returns: new combined dict object
"""
if not isinstance(base, dict):
raise AssertionError("`base` must be of type <dict>")
if not isinstance(other, dict):
raise AssertionError("`other` must be of type <dict>")
combined = dict()
for key, value in iteritems(base):
if isinstance(value, dict):
if key in other:
item = other.get(key)
if item is not None:
if isinstance(other[key], Mapping):
combined[key] = dict_merge(value, other[key])
else:
combined[key] = other[key]
else:
combined[key] = item
else:
combined[key] = value
elif isinstance(value, list):
if key in other:
item = other.get(key)
if item is not None:
try:
combined[key] = list(set(chain(value, item)))
except TypeError:
value.extend([i for i in item if i not in value])
combined[key] = value
else:
combined[key] = item
else:
combined[key] = value
else:
if key in other:
other_value = other.get(key)
if other_value is not None:
if sort_list(base[key]) != sort_list(other_value):
combined[key] = other_value
else:
combined[key] = value
else:
combined[key] = other_value
else:
combined[key] = value
for key in set(other.keys()).difference(base.keys()):
combined[key] = other.get(key)
return combined
def param_list_to_dict(param_list, unique_key="name", remove_key=True):
"""Rotates a list of dictionaries to be a dictionary of dictionaries.
:param param_list: The aforementioned list of dictionaries
:param unique_key: The name of a key which is present and unique in all of param_list's dictionaries. The value
behind this key will be the key each dictionary can be found at in the new root dictionary
:param remove_key: If True, remove unique_key from the individual dictionaries before returning.
"""
param_dict = {}
for params in param_list:
params = params.copy()
if remove_key:
name = params.pop(unique_key)
else:
name = params.get(unique_key)
param_dict[name] = params
return param_dict
def conditional(expr, val, cast=None):
match = re.match(r"^(.+)\((.+)\)$", str(expr), re.I)
if match:
op, arg = match.groups()
else:
op = "eq"
if " " in str(expr):
raise AssertionError("invalid expression: cannot contain spaces")
arg = expr
if cast is None and val is not None:
arg = type(val)(arg)
elif callable(cast):
arg = cast(arg)
val = cast(val)
op = next((oper for alias, oper in ALIASES if op == alias), op)
if not hasattr(operator, op) and op not in OPERATORS:
raise ValueError("unknown operator: %s" % op)
func = getattr(operator, op)
return func(val, arg)
def ternary(value, true_val, false_val):
""" value ? true_val : false_val """
if value:
return true_val
else:
return false_val
def remove_default_spec(spec):
for item in spec:
if "default" in spec[item]:
del spec[item]["default"]
def validate_ip_address(address):
try:
socket.inet_aton(address)
except socket.error:
return False
return address.count(".") == 3
def validate_ip_v6_address(address):
try:
socket.inet_pton(socket.AF_INET6, address)
except socket.error:
return False
return True
def validate_prefix(prefix):
if prefix and not 0 <= int(prefix) <= 32:
return False
return True
def load_provider(spec, args):
provider = args.get("provider") or {}
for key, value in iteritems(spec):
if key not in provider:
if "fallback" in value:
provider[key] = _fallback(value["fallback"])
elif "default" in value:
provider[key] = value["default"]
else:
provider[key] = None
if "authorize" in provider:
# Coerce authorize to provider if a string has somehow snuck in.
provider["authorize"] = boolean(provider["authorize"] or False)
args["provider"] = provider
return provider
def _fallback(fallback):
strategy = fallback[0]
args = []
kwargs = {}
for item in fallback[1:]:
if isinstance(item, dict):
kwargs = item
else:
args = item
try:
return strategy(*args, **kwargs)
except basic.AnsibleFallbackNotFound:
pass
def generate_dict(spec):
"""
Generate dictionary which is in sync with argspec
:param spec: A dictionary that is the argspec of the module
:rtype: A dictionary
:returns: A dictionary in sync with argspec with default value
"""
obj = {}
if not spec:
return obj
for key, val in iteritems(spec):
if "default" in val:
dct = {key: val["default"]}
elif "type" in val and val["type"] == "dict":
dct = {key: generate_dict(val["options"])}
else:
dct = {key: None}
obj.update(dct)
return obj
def parse_conf_arg(cfg, arg):
"""
Parse config based on argument
:param cfg: A text string which is a line of configuration.
:param arg: A text string which is to be matched.
:rtype: A text string
:returns: A text string if match is found
"""
match = re.search(r"%s (.+)(\n|$)" % arg, cfg, re.M)
if match:
result = match.group(1).strip()
else:
result = None
return result
def parse_conf_cmd_arg(cfg, cmd, res1, res2=None, delete_str="no"):
"""
Parse config based on command
:param cfg: A text string which is a line of configuration.
:param cmd: A text string which is the command to be matched
:param res1: A text string to be returned if the command is present
:param res2: A text string to be returned if the negate command
is present
:param delete_str: A text string to identify the start of the
negate command
:rtype: A text string
:returns: A text string if match is found
"""
match = re.search(r"\n\s+%s(\n|$)" % cmd, cfg)
if match:
return res1
if res2 is not None:
match = re.search(r"\n\s+%s %s(\n|$)" % (delete_str, cmd), cfg)
if match:
return res2
return None
def get_xml_conf_arg(cfg, path, data="text"):
"""
:param cfg: The top level configuration lxml Element tree object
:param path: The relative xpath w.r.t to top level element (cfg)
to be searched in the xml hierarchy
:param data: The type of data to be returned for the matched xml node.
Valid values are text, tag, attrib, with default as text.
:return: Returns the required type for the matched xml node or else None
"""
match = cfg.xpath(path)
if len(match):
if data == "tag":
result = getattr(match[0], "tag")
elif data == "attrib":
result = getattr(match[0], "attrib")
else:
result = getattr(match[0], "text")
else:
result = None
return result
def remove_empties(cfg_dict):
"""
Generate final config dictionary
:param cfg_dict: A dictionary parsed in the facts system
:rtype: A dictionary
:returns: A dictionary by eliminating keys that have null values
"""
final_cfg = {}
if not cfg_dict:
return final_cfg
for key, val in iteritems(cfg_dict):
dct = None
if isinstance(val, dict):
child_val = remove_empties(val)
if child_val:
dct = {key: child_val}
elif (
isinstance(val, list)
and val
and all([isinstance(x, dict) for x in val])
):
child_val = [remove_empties(x) for x in val]
if child_val:
dct = {key: child_val}
elif val not in [None, [], {}, (), ""]:
dct = {key: val}
if dct:
final_cfg.update(dct)
return final_cfg
def validate_config(spec, data):
"""
Validate if the input data against the AnsibleModule spec format
:param spec: Ansible argument spec
:param data: Data to be validated
:return:
"""
params = basic._ANSIBLE_ARGS
basic._ANSIBLE_ARGS = to_bytes(json.dumps({"ANSIBLE_MODULE_ARGS": data}))
validated_data = basic.AnsibleModule(spec).params
basic._ANSIBLE_ARGS = params
return validated_data
def search_obj_in_list(name, lst, key="name"):
if not lst:
return None
else:
for item in lst:
if item.get(key) == name:
return item
class Template:
def __init__(self):
if not HAS_JINJA2:
raise ImportError(
"jinja2 is required but does not appear to be installed. "
"It can be installed using `pip install jinja2`"
)
self.env = Environment(undefined=StrictUndefined)
self.env.filters.update({"ternary": ternary})
def __call__(self, value, variables=None, fail_on_undefined=True):
variables = variables or {}
if not self.contains_vars(value):
return value
try:
value = self.env.from_string(value).render(variables)
except UndefinedError:
if not fail_on_undefined:
return None
raise
if value:
try:
return ast.literal_eval(value)
except Exception:
return str(value)
else:
return None
def contains_vars(self, data):
if isinstance(data, string_types):
for marker in (
self.env.block_start_string,
self.env.variable_start_string,
self.env.comment_start_string,
):
if marker in data:
return True
return False

@ -0,0 +1,147 @@
#
# (c) 2018 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import json
from copy import deepcopy
from contextlib import contextmanager
try:
from lxml.etree import fromstring, tostring
except ImportError:
from xml.etree.ElementTree import fromstring, tostring
from ansible.module_utils._text import to_text, to_bytes
from ansible.module_utils.connection import Connection, ConnectionError
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.netconf import (
NetconfConnection,
)
IGNORE_XML_ATTRIBUTE = ()
def get_connection(module):
if hasattr(module, "_netconf_connection"):
return module._netconf_connection
capabilities = get_capabilities(module)
network_api = capabilities.get("network_api")
if network_api == "netconf":
module._netconf_connection = NetconfConnection(module._socket_path)
else:
module.fail_json(msg="Invalid connection type %s" % network_api)
return module._netconf_connection
def get_capabilities(module):
if hasattr(module, "_netconf_capabilities"):
return module._netconf_capabilities
capabilities = Connection(module._socket_path).get_capabilities()
module._netconf_capabilities = json.loads(capabilities)
return module._netconf_capabilities
def lock_configuration(module, target=None):
conn = get_connection(module)
return conn.lock(target=target)
def unlock_configuration(module, target=None):
conn = get_connection(module)
return conn.unlock(target=target)
@contextmanager
def locked_config(module, target=None):
try:
lock_configuration(module, target=target)
yield
finally:
unlock_configuration(module, target=target)
def get_config(module, source, filter=None, lock=False):
conn = get_connection(module)
try:
locked = False
if lock:
conn.lock(target=source)
locked = True
response = conn.get_config(source=source, filter=filter)
except ConnectionError as e:
module.fail_json(
msg=to_text(e, errors="surrogate_then_replace").strip()
)
finally:
if locked:
conn.unlock(target=source)
return response
def get(module, filter, lock=False):
conn = get_connection(module)
try:
locked = False
if lock:
conn.lock(target="running")
locked = True
response = conn.get(filter=filter)
except ConnectionError as e:
module.fail_json(
msg=to_text(e, errors="surrogate_then_replace").strip()
)
finally:
if locked:
conn.unlock(target="running")
return response
def dispatch(module, request):
conn = get_connection(module)
try:
response = conn.dispatch(request)
except ConnectionError as e:
module.fail_json(
msg=to_text(e, errors="surrogate_then_replace").strip()
)
return response
def sanitize_xml(data):
tree = fromstring(
to_bytes(deepcopy(data), errors="surrogate_then_replace")
)
for element in tree.getiterator():
# remove attributes
attribute = element.attrib
if attribute:
for key in list(attribute):
if key not in IGNORE_XML_ATTRIBUTE:
attribute.pop(key)
return to_text(tostring(tree), errors="surrogate_then_replace").strip()

@ -0,0 +1,61 @@
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2018 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from ansible.module_utils.connection import Connection
def get(module, path=None, content=None, fields=None, output="json"):
if path is None:
raise ValueError("path value must be provided")
if content:
path += "?" + "content=%s" % content
if fields:
path += "?" + "field=%s" % fields
accept = None
if output == "xml":
accept = "application/yang-data+xml"
connection = Connection(module._socket_path)
return connection.send_request(
None, path=path, method="GET", accept=accept
)
def edit_config(module, path=None, content=None, method="GET", format="json"):
if path is None:
raise ValueError("path value must be provided")
content_type = None
if format == "xml":
content_type = "application/yang-data+xml"
connection = Connection(module._socket_path)
return connection.send_request(
content, path=path, method=method, content_type=content_type
)

@ -0,0 +1,444 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "network",
}
DOCUMENTATION = """module: cli_config
author: Trishna Guha (@trishnaguha)
notes:
- The commands will be returned only for platforms that do not support onbox diff.
The C(--diff) option with the playbook will return the difference in configuration
for devices that has support for onbox diff
short_description: Push text based configuration to network devices over network_cli
description:
- This module provides platform agnostic way of pushing text based configuration to
network devices over network_cli connection plugin.
extends_documentation_fragment:
- ansible.netcommon.network_agnostic
options:
config:
description:
- The config to be pushed to the network device. This argument is mutually exclusive
with C(rollback) and either one of the option should be given as input. The
config should have indentation that the device uses.
type: str
commit:
description:
- The C(commit) argument instructs the module to push the configuration to the
device. This is mapped to module check mode.
type: bool
replace:
description:
- If the C(replace) argument is set to C(yes), it will replace the entire running-config
of the device with the C(config) argument value. For devices that support replacing
running configuration from file on device like NXOS/JUNOS, the C(replace) argument
takes path to the file on the device that will be used for replacing the entire
running-config. The value of C(config) option should be I(None) for such devices.
Nexus 9K devices only support replace. Use I(net_put) or I(nxos_file_copy) in
case of NXOS module to copy the flat file to remote device and then use set
the fullpath to this argument.
type: str
backup:
description:
- This argument will cause the module to create a full backup of the current running
config from the remote device before any changes are made. If the C(backup_options)
value is not given, the backup file is written to the C(backup) folder in the
playbook root directory or role root directory, if playbook is part of an ansible
role. If the directory does not exist, it is created.
type: bool
default: 'no'
rollback:
description:
- The C(rollback) argument instructs the module to rollback the current configuration
to the identifier specified in the argument. If the specified rollback identifier
does not exist on the remote device, the module will fail. To rollback to the
most recent commit, set the C(rollback) argument to 0. This option is mutually
exclusive with C(config).
commit_comment:
description:
- The C(commit_comment) argument specifies a text string to be used when committing
the configuration. If the C(commit) argument is set to False, this argument
is silently ignored. This argument is only valid for the platforms that support
commit operation with comment.
type: str
defaults:
description:
- The I(defaults) argument will influence how the running-config is collected
from the device. When the value is set to true, the command used to collect
the running-config is append with the all keyword. When the value is set to
false, the command is issued without the all keyword.
default: 'no'
type: bool
multiline_delimiter:
description:
- This argument is used when pushing a multiline configuration element to the
device. It specifies the character to use as the delimiting character. This
only applies to the configuration action.
type: str
diff_replace:
description:
- Instructs the module on the way to perform the configuration on the device.
If the C(diff_replace) argument is set to I(line) then the modified lines are
pushed to the device in configuration mode. If the argument is set to I(block)
then the entire command block is pushed to the device in configuration mode
if any line is not correct. Note that this parameter will be ignored if the
platform has onbox diff support.
choices:
- line
- block
- config
diff_match:
description:
- Instructs the module on the way to perform the matching of the set of commands
against the current device config. If C(diff_match) is set to I(line), commands
are matched line by line. If C(diff_match) is set to I(strict), command lines
are matched with respect to position. If C(diff_match) is set to I(exact), command
lines must be an equal match. Finally, if C(diff_match) is set to I(none), the
module will not attempt to compare the source configuration with the running
configuration on the remote device. Note that this parameter will be ignored
if the platform has onbox diff support.
choices:
- line
- strict
- exact
- none
diff_ignore_lines:
description:
- Use this argument to specify one or more lines that should be ignored during
the diff. This is used for lines in the configuration that are automatically
updated by the system. This argument takes a list of regular expressions or
exact line matches. Note that this parameter will be ignored if the platform
has onbox diff support.
backup_options:
description:
- This is a dict object containing configurable options related to backup file
path. The value of this option is read only when C(backup) is set to I(yes),
if C(backup) is set to I(no) this option will be silently ignored.
suboptions:
filename:
description:
- The filename to be used to store the backup configuration. If the filename
is not given it will be generated based on the hostname, current time and
date in format defined by <hostname>_config.<current-date>@<current-time>
dir_path:
description:
- This option provides the path ending with directory name in which the backup
configuration file will be stored. If the directory does not exist it will
be first created and the filename is either the value of C(filename) or
default filename as described in C(filename) options description. If the
path value is not given in that case a I(backup) directory will be created
in the current working directory and backup configuration will be copied
in C(filename) within I(backup) directory.
type: path
type: dict
"""
EXAMPLES = """
- name: configure device with config
cli_config:
config: "{{ lookup('template', 'basic/config.j2') }}"
- name: multiline config
cli_config:
config: |
hostname foo
feature nxapi
- name: configure device with config with defaults enabled
cli_config:
config: "{{ lookup('template', 'basic/config.j2') }}"
defaults: yes
- name: Use diff_match
cli_config:
config: "{{ lookup('file', 'interface_config') }}"
diff_match: none
- name: nxos replace config
cli_config:
replace: 'bootflash:nxoscfg'
- name: junos replace config
cli_config:
replace: '/var/home/ansible/junos01.cfg'
- name: commit with comment
cli_config:
config: set system host-name foo
commit_comment: this is a test
- name: configurable backup path
cli_config:
config: "{{ lookup('template', 'basic/config.j2') }}"
backup: yes
backup_options:
filename: backup.cfg
dir_path: /home/user
"""
RETURN = """
commands:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['interface Loopback999', 'no shutdown']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: str
sample: /playbooks/ansible/backup/hostname_config.2016-07-16@22:28:34
"""
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils._text import to_text
def validate_args(module, device_operations):
"""validate param if it is supported on the platform
"""
feature_list = [
"replace",
"rollback",
"commit_comment",
"defaults",
"multiline_delimiter",
"diff_replace",
"diff_match",
"diff_ignore_lines",
]
for feature in feature_list:
if module.params[feature]:
supports_feature = device_operations.get("supports_%s" % feature)
if supports_feature is None:
module.fail_json(
"This platform does not specify whether %s is supported or not. "
"Please report an issue against this platform's cliconf plugin."
% feature
)
elif not supports_feature:
module.fail_json(
msg="Option %s is not supported on this platform" % feature
)
def run(
module, device_operations, connection, candidate, running, rollback_id
):
result = {}
resp = {}
config_diff = []
banner_diff = {}
replace = module.params["replace"]
commit_comment = module.params["commit_comment"]
multiline_delimiter = module.params["multiline_delimiter"]
diff_replace = module.params["diff_replace"]
diff_match = module.params["diff_match"]
diff_ignore_lines = module.params["diff_ignore_lines"]
commit = not module.check_mode
if replace in ("yes", "true", "True"):
replace = True
elif replace in ("no", "false", "False"):
replace = False
if (
replace is not None
and replace not in [True, False]
and candidate is not None
):
module.fail_json(
msg="Replace value '%s' is a configuration file path already"
" present on the device. Hence 'replace' and 'config' options"
" are mutually exclusive" % replace
)
if rollback_id is not None:
resp = connection.rollback(rollback_id, commit)
if "diff" in resp:
result["changed"] = True
elif device_operations.get("supports_onbox_diff"):
if diff_replace:
module.warn(
"diff_replace is ignored as the device supports onbox diff"
)
if diff_match:
module.warn(
"diff_mattch is ignored as the device supports onbox diff"
)
if diff_ignore_lines:
module.warn(
"diff_ignore_lines is ignored as the device supports onbox diff"
)
if candidate and not isinstance(candidate, list):
candidate = candidate.strip("\n").splitlines()
kwargs = {
"candidate": candidate,
"commit": commit,
"replace": replace,
"comment": commit_comment,
}
resp = connection.edit_config(**kwargs)
if "diff" in resp:
result["changed"] = True
elif device_operations.get("supports_generate_diff"):
kwargs = {"candidate": candidate, "running": running}
if diff_match:
kwargs.update({"diff_match": diff_match})
if diff_replace:
kwargs.update({"diff_replace": diff_replace})
if diff_ignore_lines:
kwargs.update({"diff_ignore_lines": diff_ignore_lines})
diff_response = connection.get_diff(**kwargs)
config_diff = diff_response.get("config_diff")
banner_diff = diff_response.get("banner_diff")
if config_diff:
if isinstance(config_diff, list):
candidate = config_diff
else:
candidate = config_diff.splitlines()
kwargs = {
"candidate": candidate,
"commit": commit,
"replace": replace,
"comment": commit_comment,
}
if commit:
connection.edit_config(**kwargs)
result["changed"] = True
result["commands"] = config_diff.split("\n")
if banner_diff:
candidate = json.dumps(banner_diff)
kwargs = {"candidate": candidate, "commit": commit}
if multiline_delimiter:
kwargs.update({"multiline_delimiter": multiline_delimiter})
if commit:
connection.edit_banner(**kwargs)
result["changed"] = True
if module._diff:
if "diff" in resp:
result["diff"] = {"prepared": resp["diff"]}
else:
diff = ""
if config_diff:
if isinstance(config_diff, list):
diff += "\n".join(config_diff)
else:
diff += config_diff
if banner_diff:
diff += json.dumps(banner_diff)
result["diff"] = {"prepared": diff}
return result
def main():
"""main entry point for execution
"""
backup_spec = dict(filename=dict(), dir_path=dict(type="path"))
argument_spec = dict(
backup=dict(default=False, type="bool"),
backup_options=dict(type="dict", options=backup_spec),
config=dict(type="str"),
commit=dict(type="bool"),
replace=dict(type="str"),
rollback=dict(type="int"),
commit_comment=dict(type="str"),
defaults=dict(default=False, type="bool"),
multiline_delimiter=dict(type="str"),
diff_replace=dict(choices=["line", "block", "config"]),
diff_match=dict(choices=["line", "strict", "exact", "none"]),
diff_ignore_lines=dict(type="list"),
)
mutually_exclusive = [("config", "rollback")]
required_one_of = [["backup", "config", "rollback"]]
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_one_of=required_one_of,
supports_check_mode=True,
)
result = {"changed": False}
connection = Connection(module._socket_path)
capabilities = module.from_json(connection.get_capabilities())
if capabilities:
device_operations = capabilities.get("device_operations", dict())
validate_args(module, device_operations)
else:
device_operations = dict()
if module.params["defaults"]:
if "get_default_flag" in capabilities.get("rpc"):
flags = connection.get_default_flag()
else:
flags = "all"
else:
flags = []
candidate = module.params["config"]
candidate = (
to_text(candidate, errors="surrogate_then_replace")
if candidate
else None
)
running = connection.get_config(flags=flags)
rollback_id = module.params["rollback"]
if module.params["backup"]:
result["__backup__"] = running
if candidate or rollback_id or module.params["replace"]:
try:
result.update(
run(
module,
device_operations,
connection,
candidate,
running,
rollback_id,
)
)
except Exception as exc:
module.fail_json(msg=to_text(exc))
module.exit_json(**result)
if __name__ == "__main__":
main()

@ -0,0 +1,71 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "network",
}
DOCUMENTATION = """module: net_get
author: Deepak Agrawal (@dagrawal)
short_description: Copy a file from a network device to Ansible Controller
description:
- This module provides functionality to copy file from network device to ansible controller.
extends_documentation_fragment:
- ansible.netcommon.network_agnostic
options:
src:
description:
- Specifies the source file. The path to the source file can either be the full
path on the network device or a relative path as per path supported by destination
network device.
required: true
protocol:
description:
- Protocol used to transfer file.
default: scp
choices:
- scp
- sftp
dest:
description:
- Specifies the destination file. The path to the destination file can either
be the full path on the Ansible control host or a relative path from the playbook
or role root directory.
default:
- Same filename as specified in I(src). The path will be playbook root or role
root directory if playbook is part of a role.
requirements:
- scp
notes:
- Some devices need specific configurations to be enabled before scp can work These
configuration should be pre-configured before using this module e.g ios - C(ip scp
server enable).
- User privilege to do scp on network device should be pre-configured e.g. ios - need
user privilege 15 by default for allowing scp.
- Default destination of source file.
"""
EXAMPLES = """
- name: copy file from the network device to Ansible controller
net_get:
src: running_cfg_ios1.txt
- name: copy file from ios to common location at /tmp
net_get:
src: running_cfg_sw1.txt
dest : /tmp/ios1.txt
"""
RETURN = """
"""

@ -0,0 +1,110 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["deprecated"],
"supported_by": "network",
}
DOCUMENTATION = """module: net_logging
author: Ganesh Nalawade (@ganeshrn)
short_description: Manage logging on network devices
description:
- This module provides declarative management of logging on network devices.
deprecated:
removed_in: '2.13'
alternative: Use platform-specific "[netos]_logging" module
why: Updated modules released with more functionality
extends_documentation_fragment:
- ansible.netcommon.network_agnostic
options:
dest:
description:
- Destination of the logs.
choices:
- console
- host
name:
description:
- If value of C(dest) is I(host) it indicates file-name the host name to be notified.
facility:
description:
- Set logging facility.
level:
description:
- Set logging severity levels.
aggregate:
description: List of logging definitions.
purge:
description:
- Purge logging not defined in the I(aggregate) parameter.
default: false
state:
description:
- State of the logging configuration.
default: present
choices:
- present
- absent
"""
EXAMPLES = """
- name: configure console logging
net_logging:
dest: console
facility: any
level: critical
- name: remove console logging configuration
net_logging:
dest: console
state: absent
- name: configure host logging
net_logging:
dest: host
name: 192.0.2.1
facility: kernel
level: critical
- name: Configure file logging using aggregate
net_logging:
dest: file
aggregate:
- name: test-1
facility: pfe
level: critical
- name: test-2
facility: kernel
level: emergency
- name: Delete file logging using aggregate
net_logging:
dest: file
aggregate:
- name: test-1
facility: pfe
level: critical
- name: test-2
facility: kernel
level: emergency
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- logging console critical
"""

@ -0,0 +1,82 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "network",
}
DOCUMENTATION = """module: net_put
author: Deepak Agrawal (@dagrawal)
short_description: Copy a file from Ansible Controller to a network device
description:
- This module provides functionality to copy file from Ansible controller to network
devices.
extends_documentation_fragment:
- ansible.netcommon.network_agnostic
options:
src:
description:
- Specifies the source file. The path to the source file can either be the full
path on the Ansible control host or a relative path from the playbook or role
root directory.
required: true
protocol:
description:
- Protocol used to transfer file.
default: scp
choices:
- scp
- sftp
dest:
description:
- Specifies the destination file. The path to destination file can either be the
full path or relative path as supported by network_os.
default:
- Filename from src and at default directory of user shell on network_os.
required: false
mode:
description:
- Set the file transfer mode. If mode is set to I(text) then I(src) file will
go through Jinja2 template engine to replace any vars if present in the src
file. If mode is set to I(binary) then file will be copied as it is to destination
device.
default: binary
choices:
- binary
- text
requirements:
- scp
notes:
- Some devices need specific configurations to be enabled before scp can work These
configuration should be pre-configured before using this module e.g ios - C(ip scp
server enable).
- User privilege to do scp on network device should be pre-configured e.g. ios - need
user privilege 15 by default for allowing scp.
- Default destination of source file.
"""
EXAMPLES = """
- name: copy file from ansible controller to a network device
net_put:
src: running_cfg_ios1.txt
- name: copy file at root dir of flash in slot 3 of sw1(ios)
net_put:
src: running_cfg_sw1.txt
protocol: sftp
dest : flash3:/running_cfg_sw1.txt
"""
RETURN = """
"""

@ -0,0 +1,98 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["deprecated"],
"supported_by": "network",
}
DOCUMENTATION = """module: net_static_route
author: Ricardo Carrillo Cruz (@rcarrillocruz)
short_description: Manage static IP routes on network appliances (routers, switches
et. al.)
description:
- This module provides declarative management of static IP routes on network appliances
(routers, switches et. al.).
deprecated:
removed_in: '2.13'
alternative: Use platform-specific "[netos]_static_route" module
why: Updated modules released with more functionality
extends_documentation_fragment:
- ansible.netcommon.network_agnostic
options:
prefix:
description:
- Network prefix of the static route.
required: true
mask:
description:
- Network prefix mask of the static route.
required: true
next_hop:
description:
- Next hop IP of the static route.
required: true
admin_distance:
description:
- Admin distance of the static route.
aggregate:
description: List of static route definitions
purge:
description:
- Purge static routes not defined in the I(aggregate) parameter.
default: false
state:
description:
- State of the static route configuration.
default: present
choices:
- present
- absent
"""
EXAMPLES = """
- name: configure static route
net_static_route:
prefix: 192.168.2.0
mask: 255.255.255.0
next_hop: 10.0.0.1
- name: remove configuration
net_static_route:
prefix: 192.168.2.0
mask: 255.255.255.0
next_hop: 10.0.0.1
state: absent
- name: configure aggregates of static routes
net_static_route:
aggregate:
- { prefix: 192.168.2.0, mask: 255.255.255.0, next_hop: 10.0.0.1 }
- { prefix: 192.168.3.0, mask: 255.255.255.0, next_hop: 10.0.2.1 }
- name: Remove static route collections
net_static_route:
aggregate:
- { prefix: 172.24.1.0/24, next_hop: 192.168.42.64 }
- { prefix: 172.24.3.0/24, next_hop: 192.168.42.64 }
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- ip route 192.168.2.0/24 10.0.0.1
"""

@ -0,0 +1,70 @@
#
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """author: Ansible Networking Team
netconf: default
short_description: Use default netconf plugin to run standard netconf commands as
per RFC
description:
- This default plugin provides low level abstraction apis for sending and receiving
netconf commands as per Netconf RFC specification.
options:
ncclient_device_handler:
type: str
default: default
description:
- Specifies the ncclient device handler name for network os that support default
netconf implementation as per Netconf RFC specification. To identify the ncclient
device handler name refer ncclient library documentation.
"""
import json
from ansible.module_utils._text import to_text
from ansible.plugins.netconf import NetconfBase
class Netconf(NetconfBase):
def get_text(self, ele, tag):
try:
return to_text(
ele.find(tag).text, errors="surrogate_then_replace"
).strip()
except AttributeError:
pass
def get_device_info(self):
device_info = dict()
device_info["network_os"] = "default"
return device_info
def get_capabilities(self):
result = dict()
result["rpc"] = self.get_base_rpc()
result["network_api"] = "netconf"
result["device_info"] = self.get_device_info()
result["server_capabilities"] = [c for c in self.m.server_capabilities]
result["client_capabilities"] = [c for c in self.m.client_capabilities]
result["session_id"] = self.m.session_id
result["device_operations"] = self.get_device_operations(
result["server_capabilities"]
)
return json.dumps(result)

@ -0,0 +1,133 @@
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import sys
import copy
from ansible_collections.ansible.netcommon.plugins.action.network import (
ActionModule as ActionNetworkModule,
)
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
load_provider,
)
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.ios import (
ios_provider_spec,
)
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionNetworkModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
module_name = self._task.action.split(".")[-1]
self._config_module = True if module_name == "ios_config" else False
persistent_connection = self._play_context.connection.split(".")[-1]
warnings = []
if persistent_connection == "network_cli":
provider = self._task.args.get("provider", {})
if any(provider.values()):
display.warning(
"provider is unnecessary when using network_cli and will be ignored"
)
del self._task.args["provider"]
elif self._play_context.connection == "local":
provider = load_provider(ios_provider_spec, self._task.args)
pc = copy.deepcopy(self._play_context)
pc.connection = "ansible.netcommon.network_cli"
pc.network_os = "cisco.ios.ios"
pc.remote_addr = provider["host"] or self._play_context.remote_addr
pc.port = int(provider["port"] or self._play_context.port or 22)
pc.remote_user = (
provider["username"] or self._play_context.connection_user
)
pc.password = provider["password"] or self._play_context.password
pc.private_key_file = (
provider["ssh_keyfile"] or self._play_context.private_key_file
)
pc.become = provider["authorize"] or False
if pc.become:
pc.become_method = "enable"
pc.become_pass = provider["auth_pass"]
connection = self._shared_loader_obj.connection_loader.get(
"ansible.netcommon.persistent",
pc,
sys.stdin,
task_uuid=self._task._uuid,
)
# TODO: Remove below code after ansible minimal is cut out
if connection is None:
pc.connection = "network_cli"
pc.network_os = "ios"
connection = self._shared_loader_obj.connection_loader.get(
"persistent", pc, sys.stdin, task_uuid=self._task._uuid
)
display.vvv(
"using connection plugin %s (was local)" % pc.connection,
pc.remote_addr,
)
command_timeout = (
int(provider["timeout"])
if provider["timeout"]
else connection.get_option("persistent_command_timeout")
)
connection.set_options(
direct={"persistent_command_timeout": command_timeout}
)
socket_path = connection.run()
display.vvvv("socket_path: %s" % socket_path, pc.remote_addr)
if not socket_path:
return {
"failed": True,
"msg": "unable to open shell. Please see: "
+ "https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell",
}
task_vars["ansible_socket"] = socket_path
warnings.append(
[
"connection local support for this module is deprecated and will be removed in version 2.14, use connection %s"
% pc.connection
]
)
else:
return {
"failed": True,
"msg": "Connection type %s is not valid for this module"
% self._play_context.connection,
}
result = super(ActionModule, self).run(task_vars=task_vars)
if warnings:
if "warnings" in result:
result["warnings"].extend(warnings)
else:
result["warnings"] = warnings
return result

@ -0,0 +1,465 @@
#
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
---
author: Ansible Networking Team
cliconf: ios
short_description: Use ios cliconf to run command on Cisco IOS platform
description:
- This ios plugin provides low level abstraction apis for
sending and receiving CLI commands from Cisco IOS network devices.
version_added: "2.4"
"""
import re
import time
import json
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_text
from ansible.module_utils.common._collections_compat import Mapping
from ansible.module_utils.six import iteritems
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import (
NetworkConfig,
dumps,
)
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
to_list,
)
from ansible.plugins.cliconf import CliconfBase, enable_mode
class Cliconf(CliconfBase):
@enable_mode
def get_config(self, source="running", flags=None, format=None):
if source not in ("running", "startup"):
raise ValueError(
"fetching configuration from %s is not supported" % source
)
if format:
raise ValueError(
"'format' value %s is not supported for get_config" % format
)
if not flags:
flags = []
if source == "running":
cmd = "show running-config "
else:
cmd = "show startup-config "
cmd += " ".join(to_list(flags))
cmd = cmd.strip()
return self.send_command(cmd)
def get_diff(
self,
candidate=None,
running=None,
diff_match="line",
diff_ignore_lines=None,
path=None,
diff_replace="line",
):
"""
Generate diff between candidate and running configuration. If the
remote host supports onbox diff capabilities ie. supports_onbox_diff in that case
candidate and running configurations are not required to be passed as argument.
In case if onbox diff capability is not supported candidate argument is mandatory
and running argument is optional.
:param candidate: The configuration which is expected to be present on remote host.
:param running: The base configuration which is used to generate diff.
:param diff_match: Instructs how to match the candidate configuration with current device configuration
Valid values are 'line', 'strict', 'exact', 'none'.
'line' - commands are matched line by line
'strict' - command lines are matched with respect to position
'exact' - command lines must be an equal match
'none' - will not compare the candidate configuration with the running configuration
:param diff_ignore_lines: Use this argument to specify one or more lines that should be
ignored during the diff. This is used for lines in the configuration
that are automatically updated by the system. This argument takes
a list of regular expressions or exact line matches.
:param path: The ordered set of parents that uniquely identify the section or hierarchy
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
:param diff_replace: Instructs on the way to perform the configuration on the device.
If the replace argument is set to I(line) then the modified lines are
pushed to the device in configuration mode. If the replace argument is
set to I(block) then the entire command block is pushed to the device in
configuration mode if any line is not correct.
:return: Configuration diff in json format.
{
'config_diff': '',
'banner_diff': {}
}
"""
diff = {}
device_operations = self.get_device_operations()
option_values = self.get_option_values()
if candidate is None and device_operations["supports_generate_diff"]:
raise ValueError(
"candidate configuration is required to generate diff"
)
if diff_match not in option_values["diff_match"]:
raise ValueError(
"'match' value %s in invalid, valid values are %s"
% (diff_match, ", ".join(option_values["diff_match"]))
)
if diff_replace not in option_values["diff_replace"]:
raise ValueError(
"'replace' value %s in invalid, valid values are %s"
% (diff_replace, ", ".join(option_values["diff_replace"]))
)
# prepare candidate configuration
candidate_obj = NetworkConfig(indent=1)
want_src, want_banners = self._extract_banners(candidate)
candidate_obj.load(want_src)
if running and diff_match != "none":
# running configuration
have_src, have_banners = self._extract_banners(running)
running_obj = NetworkConfig(
indent=1, contents=have_src, ignore_lines=diff_ignore_lines
)
configdiffobjs = candidate_obj.difference(
running_obj, path=path, match=diff_match, replace=diff_replace
)
else:
configdiffobjs = candidate_obj.items
have_banners = {}
diff["config_diff"] = (
dumps(configdiffobjs, "commands") if configdiffobjs else ""
)
banners = self._diff_banners(want_banners, have_banners)
diff["banner_diff"] = banners if banners else {}
return diff
@enable_mode
def edit_config(
self, candidate=None, commit=True, replace=None, comment=None
):
resp = {}
operations = self.get_device_operations()
self.check_edit_config_capability(
operations, candidate, commit, replace, comment
)
results = []
requests = []
if commit:
self.send_command("configure terminal")
for line in to_list(candidate):
if not isinstance(line, Mapping):
line = {"command": line}
cmd = line["command"]
if cmd != "end" and cmd[0] != "!":
results.append(self.send_command(**line))
requests.append(cmd)
self.send_command("end")
else:
raise ValueError("check mode is not supported")
resp["request"] = requests
resp["response"] = results
return resp
def edit_macro(
self, candidate=None, commit=True, replace=None, comment=None
):
"""
ios_config:
lines: "{{ macro_lines }}"
parents: "macro name {{ macro_name }}"
after: '@'
match: line
replace: block
"""
resp = {}
operations = self.get_device_operations()
self.check_edit_config_capability(
operations, candidate, commit, replace, comment
)
results = []
requests = []
if commit:
commands = ""
self.send_command("config terminal")
time.sleep(0.1)
# first item: macro command
commands += candidate.pop(0) + "\n"
multiline_delimiter = candidate.pop(-1)
for line in candidate:
commands += " " + line + "\n"
commands += multiline_delimiter + "\n"
obj = {"command": commands, "sendonly": True}
results.append(self.send_command(**obj))
requests.append(commands)
time.sleep(0.1)
self.send_command("end", sendonly=True)
time.sleep(0.1)
results.append(self.send_command("\n"))
requests.append("\n")
resp["request"] = requests
resp["response"] = results
return resp
def get(
self,
command=None,
prompt=None,
answer=None,
sendonly=False,
output=None,
newline=True,
check_all=False,
):
if not command:
raise ValueError("must provide value of command to execute")
if output:
raise ValueError(
"'output' value %s is not supported for get" % output
)
return self.send_command(
command=command,
prompt=prompt,
answer=answer,
sendonly=sendonly,
newline=newline,
check_all=check_all,
)
def get_device_info(self):
device_info = {}
device_info["network_os"] = "ios"
reply = self.get(command="show version")
data = to_text(reply, errors="surrogate_or_strict").strip()
match = re.search(r"Version (\S+)", data)
if match:
device_info["network_os_version"] = match.group(1).strip(",")
model_search_strs = [
r"^[Cc]isco (.+) \(revision",
r"^[Cc]isco (\S+).+bytes of .*memory",
]
for item in model_search_strs:
match = re.search(item, data, re.M)
if match:
version = match.group(1).split(" ")
device_info["network_os_model"] = version[0]
break
match = re.search(r"^(.+) uptime", data, re.M)
if match:
device_info["network_os_hostname"] = match.group(1)
match = re.search(r'image file is "(.+)"', data)
if match:
device_info["network_os_image"] = match.group(1)
return device_info
def get_device_operations(self):
return {
"supports_diff_replace": True,
"supports_commit": False,
"supports_rollback": False,
"supports_defaults": True,
"supports_onbox_diff": False,
"supports_commit_comment": False,
"supports_multiline_delimiter": True,
"supports_diff_match": True,
"supports_diff_ignore_lines": True,
"supports_generate_diff": True,
"supports_replace": False,
}
def get_option_values(self):
return {
"format": ["text"],
"diff_match": ["line", "strict", "exact", "none"],
"diff_replace": ["line", "block"],
"output": [],
}
def get_capabilities(self):
result = super(Cliconf, self).get_capabilities()
result["rpc"] += [
"edit_banner",
"get_diff",
"run_commands",
"get_defaults_flag",
]
result["device_operations"] = self.get_device_operations()
result.update(self.get_option_values())
return json.dumps(result)
def edit_banner(
self, candidate=None, multiline_delimiter="@", commit=True
):
"""
Edit banner on remote device
:param banners: Banners to be loaded in json format
:param multiline_delimiter: Line delimiter for banner
:param commit: Boolean value that indicates if the device candidate
configuration should be pushed in the running configuration or discarded.
:param diff: Boolean flag to indicate if configuration that is applied on remote host should
generated and returned in response or not
:return: Returns response of executing the configuration command received
from remote host
"""
resp = {}
banners_obj = json.loads(candidate)
results = []
requests = []
if commit:
for key, value in iteritems(banners_obj):
key += " %s" % multiline_delimiter
self.send_command("config terminal", sendonly=True)
for cmd in [key, value, multiline_delimiter]:
obj = {"command": cmd, "sendonly": True}
results.append(self.send_command(**obj))
requests.append(cmd)
self.send_command("end", sendonly=True)
time.sleep(0.1)
results.append(self.send_command("\n"))
requests.append("\n")
resp["request"] = requests
resp["response"] = results
return resp
def run_commands(self, commands=None, check_rc=True):
if commands is None:
raise ValueError("'commands' value is required")
responses = list()
for cmd in to_list(commands):
if not isinstance(cmd, Mapping):
cmd = {"command": cmd}
output = cmd.pop("output", None)
if output:
raise ValueError(
"'output' value %s is not supported for run_commands"
% output
)
try:
out = self.send_command(**cmd)
except AnsibleConnectionFailure as e:
if check_rc:
raise
out = getattr(e, "err", to_text(e))
responses.append(out)
return responses
def get_defaults_flag(self):
"""
The method identifies the filter that should be used to fetch running-configuration
with defaults.
:return: valid default filter
"""
out = self.get("show running-config ?")
out = to_text(out, errors="surrogate_then_replace")
commands = set()
for line in out.splitlines():
if line.strip():
commands.add(line.strip().split()[0])
if "all" in commands:
return "all"
else:
return "full"
def set_cli_prompt_context(self):
"""
Make sure we are in the operational cli mode
:return: None
"""
if self._connection.connected:
out = self._connection.get_prompt()
if out is None:
raise AnsibleConnectionFailure(
message=u"cli prompt is not identified from the last received"
u" response window: %s"
% self._connection._last_recv_window
)
if re.search(
r"config.*\)#",
to_text(out, errors="surrogate_then_replace").strip(),
):
self._connection.queue_message(
"vvvv", "wrong context, sending end to device"
)
self._connection.send_command("end")
def _extract_banners(self, config):
banners = {}
banner_cmds = re.findall(r"^banner (\w+)", config, re.M)
for cmd in banner_cmds:
regex = r"banner %s \^C(.+?)(?=\^C)" % cmd
match = re.search(regex, config, re.S)
if match:
key = "banner %s" % cmd
banners[key] = match.group(1).strip()
for cmd in banner_cmds:
regex = r"banner %s \^C(.+?)(?=\^C)" % cmd
match = re.search(regex, config, re.S)
if match:
config = config.replace(str(match.group(1)), "")
config = re.sub(r"banner \w+ \^C\^C", "!! banner removed", config)
return config, banners
def _diff_banners(self, want, have):
candidate = {}
for key, value in iteritems(want):
if value != have.get(key):
candidate[key] = value
return candidate

@ -0,0 +1,81 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Peter Sprygada <psprygada@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = r"""options:
provider:
description:
- B(Deprecated)
- 'Starting with Ansible 2.5 we recommend using C(connection: network_cli).'
- For more information please see the L(IOS Platform Options guide, ../network/user_guide/platform_ios.html).
- HORIZONTALLINE
- A dict object containing connection details.
type: dict
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote device
over the specified transport. The value of host is used as the destination
address for the transport.
type: str
required: true
port:
description:
- Specifies the port to use when building the connection to the remote device.
type: int
default: 22
username:
description:
- Configures the username to use to authenticate the connection to the remote
device. This value is used to authenticate the SSH session. If the value
is not specified in the task, the value of environment variable C(ANSIBLE_NET_USERNAME)
will be used instead.
type: str
password:
description:
- Specifies the password to use to authenticate the connection to the remote
device. This value is used to authenticate the SSH session. If the value
is not specified in the task, the value of environment variable C(ANSIBLE_NET_PASSWORD)
will be used instead.
type: str
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is exceeded before
the operation is completed, the module will error.
type: int
default: 10
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to the remote
device. This value is the path to the key used to authenticate the SSH
session. If the value is not specified in the task, the value of environment
variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
type: path
authorize:
description:
- Instructs the module to enter privileged mode on the remote device before
sending any commands. If not specified, the device will attempt to execute
all commands in non-privileged mode. If the value is not specified in the
task, the value of environment variable C(ANSIBLE_NET_AUTHORIZE) will be
used instead.
type: bool
default: false
auth_pass:
description:
- Specifies the password to use if required to enter privileged mode on the
remote device. If I(authorize) is false, then this argument does nothing.
If the value is not specified in the task, the value of environment variable
C(ANSIBLE_NET_AUTH_PASS) will be used instead.
type: str
notes:
- For more information on using Ansible to manage network devices see the :ref:`Ansible
Network Guide <network_guide>`
- For more information on using Ansible to manage Cisco devices see the `Cisco integration
page <https://www.ansible.com/integrations/networks/cisco>`_.
"""

@ -0,0 +1,197 @@
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import json
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
to_list,
)
from ansible.module_utils.connection import Connection, ConnectionError
_DEVICE_CONFIGS = {}
ios_provider_spec = {
"host": dict(),
"port": dict(type="int"),
"username": dict(fallback=(env_fallback, ["ANSIBLE_NET_USERNAME"])),
"password": dict(
fallback=(env_fallback, ["ANSIBLE_NET_PASSWORD"]), no_log=True
),
"ssh_keyfile": dict(
fallback=(env_fallback, ["ANSIBLE_NET_SSH_KEYFILE"]), type="path"
),
"authorize": dict(
fallback=(env_fallback, ["ANSIBLE_NET_AUTHORIZE"]), type="bool"
),
"auth_pass": dict(
fallback=(env_fallback, ["ANSIBLE_NET_AUTH_PASS"]), no_log=True
),
"timeout": dict(type="int"),
}
ios_argument_spec = {
"provider": dict(
type="dict", options=ios_provider_spec, removed_in_version=2.14
)
}
def get_provider_argspec():
return ios_provider_spec
def get_connection(module):
if hasattr(module, "_ios_connection"):
return module._ios_connection
capabilities = get_capabilities(module)
network_api = capabilities.get("network_api")
if network_api == "cliconf":
module._ios_connection = Connection(module._socket_path)
else:
module.fail_json(msg="Invalid connection type %s" % network_api)
return module._ios_connection
def get_capabilities(module):
if hasattr(module, "_ios_capabilities"):
return module._ios_capabilities
try:
capabilities = Connection(module._socket_path).get_capabilities()
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors="surrogate_then_replace"))
module._ios_capabilities = json.loads(capabilities)
return module._ios_capabilities
def get_defaults_flag(module):
connection = get_connection(module)
try:
out = connection.get_defaults_flag()
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors="surrogate_then_replace"))
return to_text(out, errors="surrogate_then_replace").strip()
def get_config(module, flags=None):
flags = to_list(flags)
section_filter = False
if flags and "section" in flags[-1]:
section_filter = True
flag_str = " ".join(flags)
try:
return _DEVICE_CONFIGS[flag_str]
except KeyError:
connection = get_connection(module)
try:
out = connection.get_config(flags=flags)
except ConnectionError as exc:
if section_filter:
# Some ios devices don't understand `| section foo`
out = get_config(module, flags=flags[:-1])
else:
module.fail_json(
msg=to_text(exc, errors="surrogate_then_replace")
)
cfg = to_text(out, errors="surrogate_then_replace").strip()
_DEVICE_CONFIGS[flag_str] = cfg
return cfg
def run_commands(module, commands, check_rc=True):
connection = get_connection(module)
try:
return connection.run_commands(commands=commands, check_rc=check_rc)
except ConnectionError as exc:
module.fail_json(msg=to_text(exc))
def load_config(module, commands):
connection = get_connection(module)
try:
resp = connection.edit_config(commands)
return resp.get("response")
except ConnectionError as exc:
module.fail_json(msg=to_text(exc))
def normalize_interface(name):
"""Return the normalized interface name
"""
if not name:
return
def _get_number(name):
digits = ""
for char in name:
if char.isdigit() or char in "/.":
digits += char
return digits
if name.lower().startswith("gi"):
if_type = "GigabitEthernet"
elif name.lower().startswith("te"):
if_type = "TenGigabitEthernet"
elif name.lower().startswith("fa"):
if_type = "FastEthernet"
elif name.lower().startswith("fo"):
if_type = "FortyGigabitEthernet"
elif name.lower().startswith("et"):
if_type = "Ethernet"
elif name.lower().startswith("vl"):
if_type = "Vlan"
elif name.lower().startswith("lo"):
if_type = "loopback"
elif name.lower().startswith("po"):
if_type = "port-channel"
elif name.lower().startswith("nv"):
if_type = "nve"
elif name.lower().startswith("twe"):
if_type = "TwentyFiveGigE"
elif name.lower().startswith("hu"):
if_type = "HundredGigE"
else:
if_type = None
number_list = name.split(" ")
if len(number_list) == 2:
if_number = number_list[-1].strip()
else:
if_number = _get_number(name)
if if_type:
proper_interface = if_type + if_number
else:
proper_interface = name
return proper_interface

@ -0,0 +1,229 @@
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "network",
}
DOCUMENTATION = """module: ios_command
author: Peter Sprygada (@privateip)
short_description: Run commands on remote devices running Cisco IOS
description:
- Sends arbitrary commands to an ios node and returns the results read from the device.
This module includes an argument that will cause the module to wait for a specific
condition before returning or timing out if the condition is not met.
- This module does not support running commands in configuration mode. Please use
M(ios_config) to configure IOS devices.
extends_documentation_fragment:
- cisco.ios.ios
notes:
- Tested against IOS 15.6
options:
commands:
description:
- List of commands to send to the remote ios device over the configured provider.
The resulting output from the command is returned. If the I(wait_for) argument
is provided, the module is not returned until the condition is satisfied or
the number of retries has expired. If a command sent to the device requires
answering a prompt, it is possible to pass a dict containing I(command), I(answer)
and I(prompt). Common answers are 'y' or "\r" (carriage return, must be double
quotes). See examples.
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the command. The task will
wait for each condition to be true before moving forward. If the conditional
is not true within the configured number of retries, the task fails. See examples.
aliases:
- waitfor
match:
description:
- The I(match) argument is used in conjunction with the I(wait_for) argument to
specify the match policy. Valid values are C(all) or C(any). If the value
is set to C(all) then all conditionals in the wait_for must be satisfied. If
the value is set to C(any) then only one of the values must be satisfied.
default: all
choices:
- any
- all
retries:
description:
- Specifies the number of retries a command should by tried before it is considered
failed. The command is run on the target device every retry and evaluated against
the I(wait_for) conditions.
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries of the command. If
the command does not pass the specified conditions, the interval indicates how
long to wait before trying the command again.
default: 1
"""
EXAMPLES = r"""
tasks:
- name: run show version on remote devices
ios_command:
commands: show version
- name: run show version and check to see if output contains IOS
ios_command:
commands: show version
wait_for: result[0] contains IOS
- name: run multiple commands on remote nodes
ios_command:
commands:
- show version
- show interfaces
- name: run multiple commands and evaluate the output
ios_command:
commands:
- show version
- show interfaces
wait_for:
- result[0] contains IOS
- result[1] contains Loopback0
- name: run commands that require answering a prompt
ios_command:
commands:
- command: 'clear counters GigabitEthernet0/1'
prompt: 'Clear "show interface" counters on this interface \[confirm\]'
answer: 'y'
- command: 'clear counters GigabitEthernet0/2'
prompt: '[confirm]'
answer: "\r"
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always apart from low level errors (such as action plugin)
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always apart from low level errors (such as action plugin)
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
"""
import time
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import (
Conditional,
)
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
transform_commands,
to_lines,
)
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.ios import (
run_commands,
)
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.ios import (
ios_argument_spec,
)
def parse_commands(module, warnings):
commands = transform_commands(module)
if module.check_mode:
for item in list(commands):
if not item["command"].startswith("show"):
warnings.append(
"Only show commands are supported when using check mode, not "
"executing %s" % item["command"]
)
commands.remove(item)
return commands
def main():
"""main entry point for module execution
"""
argument_spec = dict(
commands=dict(type="list", required=True),
wait_for=dict(type="list", aliases=["waitfor"]),
match=dict(default="all", choices=["all", "any"]),
retries=dict(default=10, type="int"),
interval=dict(default=1, type="int"),
)
argument_spec.update(ios_argument_spec)
module = AnsibleModule(
argument_spec=argument_spec, supports_check_mode=True
)
warnings = list()
result = {"changed": False, "warnings": warnings}
commands = parse_commands(module, warnings)
wait_for = module.params["wait_for"] or list()
try:
conditionals = [Conditional(c) for c in wait_for]
except AttributeError as exc:
module.fail_json(msg=to_text(exc))
retries = module.params["retries"]
interval = module.params["interval"]
match = module.params["match"]
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == "any":
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = "One or more conditional statements have not been satisfied"
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result.update(
{"stdout": responses, "stdout_lines": list(to_lines(responses))}
)
module.exit_json(**result)
if __name__ == "__main__":
main()

@ -0,0 +1,596 @@
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "network",
}
DOCUMENTATION = """module: ios_config
author: Peter Sprygada (@privateip)
short_description: Manage Cisco IOS configuration sections
description:
- Cisco IOS configurations use a simple block indent file syntax for segmenting configuration
into sections. This module provides an implementation for working with IOS configuration
sections in a deterministic way.
extends_documentation_fragment:
- cisco.ios.ios
notes:
- Tested against IOS 15.6
- Abbreviated commands are NOT idempotent, see L(Network FAQ,../network/user_guide/faq.html#why-do-the-config-modules-always-return-changed-true-with-abbreviated-commands).
options:
lines:
description:
- The ordered set of commands that should be configured in the section. The commands
must be the exact same commands as found in the device running-config. Be sure
to note the configuration command syntax as some commands are automatically
modified by the device config parser.
aliases:
- commands
parents:
description:
- The ordered set of parents that uniquely identify the section or hierarchy the
commands should be checked against. If the parents argument is omitted, the
commands are checked against the set of top level or global commands.
src:
description:
- Specifies the source path to the file that contains the configuration or configuration
template to load. The path to the source file can either be the full path on
the Ansible control host or a relative path from the playbook or role root directory. This
argument is mutually exclusive with I(lines), I(parents).
before:
description:
- The ordered set of commands to push on to the command stack if a change needs
to be made. This allows the playbook designer the opportunity to perform configuration
commands prior to pushing any changes without affecting how the set of commands
are matched against the system.
after:
description:
- The ordered set of commands to append to the end of the command stack if a change
needs to be made. Just like with I(before) this allows the playbook designer
to append a set of commands to be executed after the command set.
match:
description:
- Instructs the module on the way to perform the matching of the set of commands
against the current device config. If match is set to I(line), commands are
matched line by line. If match is set to I(strict), command lines are matched
with respect to position. If match is set to I(exact), command lines must be
an equal match. Finally, if match is set to I(none), the module will not attempt
to compare the source configuration with the running configuration on the remote
device.
choices:
- line
- strict
- exact
- none
default: line
replace:
description:
- Instructs the module on the way to perform the configuration on the device.
If the replace argument is set to I(line) then the modified lines are pushed
to the device in configuration mode. If the replace argument is set to I(block)
then the entire command block is pushed to the device in configuration mode
if any line is not correct.
default: line
choices:
- line
- block
multiline_delimiter:
description:
- This argument is used when pushing a multiline configuration element to the
IOS device. It specifies the character to use as the delimiting character. This
only applies to the configuration action.
default: '@'
backup:
description:
- This argument will cause the module to create a full backup of the current C(running-config)
from the remote device before any changes are made. If the C(backup_options)
value is not given, the backup file is written to the C(backup) folder in the
playbook root directory or role root directory, if playbook is part of an ansible
role. If the directory does not exist, it is created.
type: bool
default: 'no'
running_config:
description:
- The module, by default, will connect to the remote device and retrieve the current
running-config to use as a base for comparing against the contents of source.
There are times when it is not desirable to have the task get the current running-config
for every task in a playbook. The I(running_config) argument allows the implementer
to pass in the configuration to use as the base config for comparison.
aliases:
- config
defaults:
description:
- This argument specifies whether or not to collect all defaults when getting
the remote device running config. When enabled, the module will get the current
config by issuing the command C(show running-config all).
type: bool
default: 'no'
save_when:
description:
- When changes are made to the device running-configuration, the changes are not
copied to non-volatile storage by default. Using this argument will change
that before. If the argument is set to I(always), then the running-config will
always be copied to the startup-config and the I(modified) flag will always
be set to True. If the argument is set to I(modified), then the running-config
will only be copied to the startup-config if it has changed since the last save
to startup-config. If the argument is set to I(never), the running-config will
never be copied to the startup-config. If the argument is set to I(changed),
then the running-config will only be copied to the startup-config if the task
has made a change. I(changed) was added in Ansible 2.5.
default: never
choices:
- always
- never
- modified
- changed
diff_against:
description:
- When using the C(ansible-playbook --diff) command line argument the module can
generate diffs against different sources.
- When this option is configure as I(startup), the module will return the diff
of the running-config against the startup-config.
- When this option is configured as I(intended), the module will return the diff
of the running-config against the configuration provided in the C(intended_config)
argument.
- When this option is configured as I(running), the module will return the before
and after diff of the running-config with respect to any changes made to the
device configuration.
choices:
- running
- startup
- intended
diff_ignore_lines:
description:
- Use this argument to specify one or more lines that should be ignored during
the diff. This is used for lines in the configuration that are automatically
updated by the system. This argument takes a list of regular expressions or
exact line matches.
intended_config:
description:
- The C(intended_config) provides the master configuration that the node should
conform to and is used to check the final running-config against. This argument
will not modify any settings on the remote device and is strictly used to check
the compliance of the current device's configuration against. When specifying
this argument, the task should also modify the C(diff_against) value and set
it to I(intended).
backup_options:
description:
- This is a dict object containing configurable options related to backup file
path. The value of this option is read only when C(backup) is set to I(yes),
if C(backup) is set to I(no) this option will be silently ignored.
suboptions:
filename:
description:
- The filename to be used to store the backup configuration. If the filename
is not given it will be generated based on the hostname, current time and
date in format defined by <hostname>_config.<current-date>@<current-time>
dir_path:
description:
- This option provides the path ending with directory name in which the backup
configuration file will be stored. If the directory does not exist it will
be first created and the filename is either the value of C(filename) or
default filename as described in C(filename) options description. If the
path value is not given in that case a I(backup) directory will be created
in the current working directory and backup configuration will be copied
in C(filename) within I(backup) directory.
type: path
type: dict
"""
EXAMPLES = """
- name: configure top level configuration
ios_config:
lines: hostname {{ inventory_hostname }}
- name: configure interface settings
ios_config:
lines:
- description test interface
- ip address 172.31.1.1 255.255.255.0
parents: interface Ethernet1
- name: configure ip helpers on multiple interfaces
ios_config:
lines:
- ip helper-address 172.26.1.10
- ip helper-address 172.26.3.8
parents: "{{ item }}"
with_items:
- interface Ethernet1
- interface Ethernet2
- interface GigabitEthernet1
- name: configure policer in Scavenger class
ios_config:
lines:
- conform-action transmit
- exceed-action drop
parents:
- policy-map Foo
- class Scavenger
- police cir 64000
- name: load new acl into device
ios_config:
lines:
- 10 permit ip host 192.0.2.1 any log
- 20 permit ip host 192.0.2.2 any log
- 30 permit ip host 192.0.2.3 any log
- 40 permit ip host 192.0.2.4 any log
- 50 permit ip host 192.0.2.5 any log
parents: ip access-list extended test
before: no ip access-list extended test
match: exact
- name: check the running-config against master config
ios_config:
diff_against: intended
intended_config: "{{ lookup('file', 'master.cfg') }}"
- name: check the startup-config against the running-config
ios_config:
diff_against: startup
diff_ignore_lines:
- ntp clock .*
- name: save running to startup when modified
ios_config:
save_when: modified
- name: for idempotency, use full-form commands
ios_config:
lines:
# - shut
- shutdown
# parents: int gig1/0/11
parents: interface GigabitEthernet1/0/11
# Set boot image based on comparison to a group_var (version) and the version
# that is returned from the `ios_facts` module
- name: SETTING BOOT IMAGE
ios_config:
lines:
- no boot system
- boot system flash bootflash:{{new_image}}
host: "{{ inventory_hostname }}"
when: ansible_net_version != version
- name: render a Jinja2 template onto an IOS device
ios_config:
backup: yes
src: ios_template.j2
- name: configurable backup path
ios_config:
src: ios_template.j2
backup: yes
backup_options:
filename: backup.cfg
dir_path: /home/user
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['hostname foo', 'router ospf 1', 'router-id 192.0.2.1']
commands:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['hostname foo', 'router ospf 1', 'router-id 192.0.2.1']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: str
sample: /playbooks/ansible/backup/ios_config.2016-07-16@22:28:34
filename:
description: The name of the backup file
returned: when backup is yes and filename is not specified in backup options
type: str
sample: ios_config.2016-07-16@22:28:34
shortname:
description: The full path to the backup file excluding the timestamp
returned: when backup is yes and filename is not specified in backup options
type: str
sample: /playbooks/ansible/backup/ios_config
date:
description: The date extracted from the backup file name
returned: when backup is yes
type: str
sample: "2016-07-16"
time:
description: The time extracted from the backup file name
returned: when backup is yes
type: str
sample: "22:28:34"
"""
import json
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import ConnectionError
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.ios import (
run_commands,
get_config,
)
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.ios import (
get_defaults_flag,
get_connection,
)
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.ios import (
ios_argument_spec,
)
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import (
NetworkConfig,
dumps,
)
def check_args(module, warnings):
if module.params["multiline_delimiter"]:
if len(module.params["multiline_delimiter"]) != 1:
module.fail_json(
msg="multiline_delimiter value can only be a "
"single character"
)
def edit_config_or_macro(connection, commands):
# only catch the macro configuration command,
# not negated 'no' variation.
if commands[0].startswith("macro name"):
connection.edit_macro(candidate=commands)
else:
connection.edit_config(candidate=commands)
def get_candidate_config(module):
candidate = ""
if module.params["src"]:
candidate = module.params["src"]
elif module.params["lines"]:
candidate_obj = NetworkConfig(indent=1)
parents = module.params["parents"] or list()
candidate_obj.add(module.params["lines"], parents=parents)
candidate = dumps(candidate_obj, "raw")
return candidate
def get_running_config(module, current_config=None, flags=None):
running = module.params["running_config"]
if not running:
if not module.params["defaults"] and current_config:
running = current_config
else:
running = get_config(module, flags=flags)
return running
def save_config(module, result):
result["changed"] = True
if not module.check_mode:
run_commands(module, "copy running-config startup-config\r")
else:
module.warn(
"Skipping command `copy running-config startup-config` "
"due to check_mode. Configuration not copied to "
"non-volatile storage"
)
def main():
""" main entry point for module execution
"""
backup_spec = dict(filename=dict(), dir_path=dict(type="path"))
argument_spec = dict(
src=dict(type="path"),
lines=dict(aliases=["commands"], type="list"),
parents=dict(type="list"),
before=dict(type="list"),
after=dict(type="list"),
match=dict(
default="line", choices=["line", "strict", "exact", "none"]
),
replace=dict(default="line", choices=["line", "block"]),
multiline_delimiter=dict(default="@"),
running_config=dict(aliases=["config"]),
intended_config=dict(),
defaults=dict(type="bool", default=False),
backup=dict(type="bool", default=False),
backup_options=dict(type="dict", options=backup_spec),
save_when=dict(
choices=["always", "never", "modified", "changed"], default="never"
),
diff_against=dict(choices=["startup", "intended", "running"]),
diff_ignore_lines=dict(type="list"),
)
argument_spec.update(ios_argument_spec)
mutually_exclusive = [("lines", "src"), ("parents", "src")]
required_if = [
("match", "strict", ["lines"]),
("match", "exact", ["lines"]),
("replace", "block", ["lines"]),
("diff_against", "intended", ["intended_config"]),
]
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True,
)
result = {"changed": False}
warnings = list()
check_args(module, warnings)
result["warnings"] = warnings
diff_ignore_lines = module.params["diff_ignore_lines"]
config = None
contents = None
flags = get_defaults_flag(module) if module.params["defaults"] else []
connection = get_connection(module)
if module.params["backup"] or (
module._diff and module.params["diff_against"] == "running"
):
contents = get_config(module, flags=flags)
config = NetworkConfig(indent=1, contents=contents)
if module.params["backup"]:
result["__backup__"] = contents
if any((module.params["lines"], module.params["src"])):
match = module.params["match"]
replace = module.params["replace"]
path = module.params["parents"]
candidate = get_candidate_config(module)
running = get_running_config(module, contents, flags=flags)
try:
response = connection.get_diff(
candidate=candidate,
running=running,
diff_match=match,
diff_ignore_lines=diff_ignore_lines,
path=path,
diff_replace=replace,
)
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors="surrogate_then_replace"))
config_diff = response["config_diff"]
banner_diff = response["banner_diff"]
if config_diff or banner_diff:
commands = config_diff.split("\n")
if module.params["before"]:
commands[:0] = module.params["before"]
if module.params["after"]:
commands.extend(module.params["after"])
result["commands"] = commands
result["updates"] = commands
result["banners"] = banner_diff
# send the configuration commands to the device and merge
# them with the current running config
if not module.check_mode:
if commands:
edit_config_or_macro(connection, commands)
if banner_diff:
connection.edit_banner(
candidate=json.dumps(banner_diff),
multiline_delimiter=module.params[
"multiline_delimiter"
],
)
result["changed"] = True
running_config = module.params["running_config"]
startup_config = None
if module.params["save_when"] == "always":
save_config(module, result)
elif module.params["save_when"] == "modified":
output = run_commands(
module, ["show running-config", "show startup-config"]
)
running_config = NetworkConfig(
indent=1, contents=output[0], ignore_lines=diff_ignore_lines
)
startup_config = NetworkConfig(
indent=1, contents=output[1], ignore_lines=diff_ignore_lines
)
if running_config.sha1 != startup_config.sha1:
save_config(module, result)
elif module.params["save_when"] == "changed" and result["changed"]:
save_config(module, result)
if module._diff:
if not running_config:
output = run_commands(module, "show running-config")
contents = output[0]
else:
contents = running_config
# recreate the object in order to process diff_ignore_lines
running_config = NetworkConfig(
indent=1, contents=contents, ignore_lines=diff_ignore_lines
)
if module.params["diff_against"] == "running":
if module.check_mode:
module.warn(
"unable to perform diff against running-config due to check mode"
)
contents = None
else:
contents = config.config_text
elif module.params["diff_against"] == "startup":
if not startup_config:
output = run_commands(module, "show startup-config")
contents = output[0]
else:
contents = startup_config.config_text
elif module.params["diff_against"] == "intended":
contents = module.params["intended_config"]
if contents is not None:
base_config = NetworkConfig(
indent=1, contents=contents, ignore_lines=diff_ignore_lines
)
if running_config.sha1 != base_config.sha1:
if module.params["diff_against"] == "intended":
before = running_config
after = base_config
elif module.params["diff_against"] in ("startup", "running"):
before = base_config
after = running_config
result.update(
{
"changed": True,
"diff": {"before": str(before), "after": str(after)},
}
)
module.exit_json(**result)
if __name__ == "__main__":
main()

@ -0,0 +1,115 @@
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import json
import re
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_text, to_bytes
from ansible.plugins.terminal import TerminalBase
from ansible.utils.display import Display
display = Display()
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?[\w\+\-\.:\/\[\]]+(?:\([^\)]+\)){0,3}(?:[>#]) ?$")
]
terminal_stderr_re = [
re.compile(br"% ?Error"),
# re.compile(br"^% \w+", re.M),
re.compile(br"% ?Bad secret"),
re.compile(br"[\r\n%] Bad passwords"),
re.compile(br"invalid input", re.I),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"[^\r\n]+ not found"),
re.compile(br"'[^']' +returned error code: ?\d+"),
re.compile(br"Bad mask", re.I),
re.compile(br"% ?(\S+) ?overlaps with ?(\S+)", re.I),
re.compile(br"[%\S] ?Error: ?[\s]+", re.I),
re.compile(br"[%\S] ?Informational: ?[\s]+", re.I),
re.compile(br"Command authorization failed"),
]
def on_open_shell(self):
try:
self._exec_cli_command(b"terminal length 0")
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure("unable to set terminal parameters")
try:
self._exec_cli_command(b"terminal width 512")
try:
self._exec_cli_command(b"terminal width 0")
except AnsibleConnectionFailure:
pass
except AnsibleConnectionFailure:
display.display(
"WARNING: Unable to set terminal width, command responses may be truncated"
)
def on_become(self, passwd=None):
if self._get_prompt().endswith(b"#"):
return
cmd = {u"command": u"enable"}
if passwd:
# Note: python-3.5 cannot combine u"" and r"" together. Thus make
# an r string and use to_text to ensure it's text on both py2 and py3.
cmd[u"prompt"] = to_text(
r"[\r\n]?(?:.*)?[Pp]assword: ?$", errors="surrogate_or_strict"
)
cmd[u"answer"] = passwd
cmd[u"prompt_retry_check"] = True
try:
self._exec_cli_command(
to_bytes(json.dumps(cmd), errors="surrogate_or_strict")
)
prompt = self._get_prompt()
if prompt is None or not prompt.endswith(b"#"):
raise AnsibleConnectionFailure(
"failed to elevate privilege to enable mode still at prompt [%s]"
% prompt
)
except AnsibleConnectionFailure as e:
prompt = self._get_prompt()
raise AnsibleConnectionFailure(
"unable to elevate privilege to enable mode, at prompt [%s] with error: %s"
% (prompt, e.message)
)
def on_unbecome(self):
prompt = self._get_prompt()
if prompt is None:
# if prompt is None most likely the terminal is hung up at a prompt
return
if b"(config" in prompt:
self._exec_cli_command(b"end")
self._exec_cli_command(b"disable")
elif prompt.endswith(b"#"):
self._exec_cli_command(b"disable")

@ -0,0 +1,129 @@
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import sys
import copy
from ansible_collections.ansible.netcommon.plugins.action.network import (
ActionModule as ActionNetworkModule,
)
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
load_provider,
)
from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.vyos import (
vyos_provider_spec,
)
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionNetworkModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
module_name = self._task.action.split(".")[-1]
self._config_module = True if module_name == "vyos_config" else False
persistent_connection = self._play_context.connection.split(".")[-1]
warnings = []
if persistent_connection == "network_cli":
provider = self._task.args.get("provider", {})
if any(provider.values()):
display.warning(
"provider is unnecessary when using network_cli and will be ignored"
)
del self._task.args["provider"]
elif self._play_context.connection == "local":
provider = load_provider(vyos_provider_spec, self._task.args)
pc = copy.deepcopy(self._play_context)
pc.connection = "ansible.netcommon.network_cli"
pc.network_os = "vyos.vyos.vyos"
pc.remote_addr = provider["host"] or self._play_context.remote_addr
pc.port = int(provider["port"] or self._play_context.port or 22)
pc.remote_user = (
provider["username"] or self._play_context.connection_user
)
pc.password = provider["password"] or self._play_context.password
pc.private_key_file = (
provider["ssh_keyfile"] or self._play_context.private_key_file
)
connection = self._shared_loader_obj.connection_loader.get(
"ansible.netcommon.persistent",
pc,
sys.stdin,
task_uuid=self._task._uuid,
)
# TODO: Remove below code after ansible minimal is cut out
if connection is None:
pc.connection = "network_cli"
pc.network_os = "vyos"
connection = self._shared_loader_obj.connection_loader.get(
"persistent", pc, sys.stdin, task_uuid=self._task._uuid
)
display.vvv(
"using connection plugin %s (was local)" % pc.connection,
pc.remote_addr,
)
command_timeout = (
int(provider["timeout"])
if provider["timeout"]
else connection.get_option("persistent_command_timeout")
)
connection.set_options(
direct={"persistent_command_timeout": command_timeout}
)
socket_path = connection.run()
display.vvvv("socket_path: %s" % socket_path, pc.remote_addr)
if not socket_path:
return {
"failed": True,
"msg": "unable to open shell. Please see: "
+ "https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell",
}
task_vars["ansible_socket"] = socket_path
warnings.append(
[
"connection local support for this module is deprecated and will be removed in version 2.14, use connection %s"
% pc.connection
]
)
else:
return {
"failed": True,
"msg": "Connection type %s is not valid for this module"
% self._play_context.connection,
}
result = super(ActionModule, self).run(task_vars=task_vars)
if warnings:
if "warnings" in result:
result["warnings"].extend(warnings)
else:
result["warnings"] = warnings
return result

@ -0,0 +1,342 @@
#
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
---
author: Ansible Networking Team
cliconf: vyos
short_description: Use vyos cliconf to run command on VyOS platform
description:
- This vyos plugin provides low level abstraction apis for
sending and receiving CLI commands from VyOS network devices.
version_added: "2.4"
"""
import re
import json
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_text
from ansible.module_utils.common._collections_compat import Mapping
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import (
NetworkConfig,
)
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
to_list,
)
from ansible.plugins.cliconf import CliconfBase
class Cliconf(CliconfBase):
def get_device_info(self):
device_info = {}
device_info["network_os"] = "vyos"
reply = self.get("show version")
data = to_text(reply, errors="surrogate_or_strict").strip()
match = re.search(r"Version:\s*(.*)", data)
if match:
device_info["network_os_version"] = match.group(1)
match = re.search(r"HW model:\s*(\S+)", data)
if match:
device_info["network_os_model"] = match.group(1)
reply = self.get("show host name")
device_info["network_os_hostname"] = to_text(
reply, errors="surrogate_or_strict"
).strip()
return device_info
def get_config(self, flags=None, format=None):
if format:
option_values = self.get_option_values()
if format not in option_values["format"]:
raise ValueError(
"'format' value %s is invalid. Valid values of format are %s"
% (format, ", ".join(option_values["format"]))
)
if not flags:
flags = []
if format == "text":
command = "show configuration"
else:
command = "show configuration commands"
command += " ".join(to_list(flags))
command = command.strip()
out = self.send_command(command)
return out
def edit_config(
self, candidate=None, commit=True, replace=None, comment=None
):
resp = {}
operations = self.get_device_operations()
self.check_edit_config_capability(
operations, candidate, commit, replace, comment
)
results = []
requests = []
self.send_command("configure")
for cmd in to_list(candidate):
if not isinstance(cmd, Mapping):
cmd = {"command": cmd}
results.append(self.send_command(**cmd))
requests.append(cmd["command"])
out = self.get("compare")
out = to_text(out, errors="surrogate_or_strict")
diff_config = out if not out.startswith("No changes") else None
if diff_config:
if commit:
try:
self.commit(comment)
except AnsibleConnectionFailure as e:
msg = "commit failed: %s" % e.message
self.discard_changes()
raise AnsibleConnectionFailure(msg)
else:
self.send_command("exit")
else:
self.discard_changes()
else:
self.send_command("exit")
if (
to_text(
self._connection.get_prompt(), errors="surrogate_or_strict"
)
.strip()
.endswith("#")
):
self.discard_changes()
if diff_config:
resp["diff"] = diff_config
resp["response"] = results
resp["request"] = requests
return resp
def get(
self,
command=None,
prompt=None,
answer=None,
sendonly=False,
output=None,
newline=True,
check_all=False,
):
if not command:
raise ValueError("must provide value of command to execute")
if output:
raise ValueError(
"'output' value %s is not supported for get" % output
)
return self.send_command(
command=command,
prompt=prompt,
answer=answer,
sendonly=sendonly,
newline=newline,
check_all=check_all,
)
def commit(self, comment=None):
if comment:
command = 'commit comment "{0}"'.format(comment)
else:
command = "commit"
self.send_command(command)
def discard_changes(self):
self.send_command("exit discard")
def get_diff(
self,
candidate=None,
running=None,
diff_match="line",
diff_ignore_lines=None,
path=None,
diff_replace=None,
):
diff = {}
device_operations = self.get_device_operations()
option_values = self.get_option_values()
if candidate is None and device_operations["supports_generate_diff"]:
raise ValueError(
"candidate configuration is required to generate diff"
)
if diff_match not in option_values["diff_match"]:
raise ValueError(
"'match' value %s in invalid, valid values are %s"
% (diff_match, ", ".join(option_values["diff_match"]))
)
if diff_replace:
raise ValueError("'replace' in diff is not supported")
if diff_ignore_lines:
raise ValueError("'diff_ignore_lines' in diff is not supported")
if path:
raise ValueError("'path' in diff is not supported")
set_format = candidate.startswith("set") or candidate.startswith(
"delete"
)
candidate_obj = NetworkConfig(indent=4, contents=candidate)
if not set_format:
config = [c.line for c in candidate_obj.items]
commands = list()
# this filters out less specific lines
for item in config:
for index, entry in enumerate(commands):
if item.startswith(entry):
del commands[index]
break
commands.append(item)
candidate_commands = [
"set %s" % cmd.replace(" {", "") for cmd in commands
]
else:
candidate_commands = str(candidate).strip().split("\n")
if diff_match == "none":
diff["config_diff"] = list(candidate_commands)
return diff
running_commands = [
str(c).replace("'", "") for c in running.splitlines()
]
updates = list()
visited = set()
for line in candidate_commands:
item = str(line).replace("'", "")
if not item.startswith("set") and not item.startswith("delete"):
raise ValueError(
"line must start with either `set` or `delete`"
)
elif item.startswith("set") and item not in running_commands:
updates.append(line)
elif item.startswith("delete"):
if not running_commands:
updates.append(line)
else:
item = re.sub(r"delete", "set", item)
for entry in running_commands:
if entry.startswith(item) and line not in visited:
updates.append(line)
visited.add(line)
diff["config_diff"] = list(updates)
return diff
def run_commands(self, commands=None, check_rc=True):
if commands is None:
raise ValueError("'commands' value is required")
responses = list()
for cmd in to_list(commands):
if not isinstance(cmd, Mapping):
cmd = {"command": cmd}
output = cmd.pop("output", None)
if output:
raise ValueError(
"'output' value %s is not supported for run_commands"
% output
)
try:
out = self.send_command(**cmd)
except AnsibleConnectionFailure as e:
if check_rc:
raise
out = getattr(e, "err", e)
responses.append(out)
return responses
def get_device_operations(self):
return {
"supports_diff_replace": False,
"supports_commit": True,
"supports_rollback": False,
"supports_defaults": False,
"supports_onbox_diff": True,
"supports_commit_comment": True,
"supports_multiline_delimiter": False,
"supports_diff_match": True,
"supports_diff_ignore_lines": False,
"supports_generate_diff": False,
"supports_replace": False,
}
def get_option_values(self):
return {
"format": ["text", "set"],
"diff_match": ["line", "none"],
"diff_replace": [],
"output": [],
}
def get_capabilities(self):
result = super(Cliconf, self).get_capabilities()
result["rpc"] += [
"commit",
"discard_changes",
"get_diff",
"run_commands",
]
result["device_operations"] = self.get_device_operations()
result.update(self.get_option_values())
return json.dumps(result)
def set_cli_prompt_context(self):
"""
Make sure we are in the operational cli mode
:return: None
"""
if self._connection.connected:
self._update_cli_prompt_context(
config_context="#", exit_command="exit discard"
)

@ -0,0 +1,63 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Peter Sprygada <psprygada@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = r"""options:
provider:
description:
- B(Deprecated)
- 'Starting with Ansible 2.5 we recommend using C(connection: network_cli).'
- For more information please see the L(Network Guide, ../network/getting_started/network_differences.html#multiple-communication-protocols).
- HORIZONTALLINE
- A dict object containing connection details.
type: dict
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote device
over the specified transport. The value of host is used as the destination
address for the transport.
type: str
required: true
port:
description:
- Specifies the port to use when building the connection to the remote device.
type: int
default: 22
username:
description:
- Configures the username to use to authenticate the connection to the remote
device. This value is used to authenticate the SSH session. If the value
is not specified in the task, the value of environment variable C(ANSIBLE_NET_USERNAME)
will be used instead.
type: str
password:
description:
- Specifies the password to use to authenticate the connection to the remote
device. This value is used to authenticate the SSH session. If the value
is not specified in the task, the value of environment variable C(ANSIBLE_NET_PASSWORD)
will be used instead.
type: str
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is exceeded before
the operation is completed, the module will error.
type: int
default: 10
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to the remote
device. This value is the path to the key used to authenticate the SSH
session. If the value is not specified in the task, the value of environment
variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
type: path
notes:
- For more information on using Ansible to manage network devices see the :ref:`Ansible
Network Guide <network_guide>`
"""

@ -0,0 +1,22 @@
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The arg spec for the vyos facts module.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
class FactsArgs(object): # pylint: disable=R0903
""" The arg spec for the vyos facts module
"""
def __init__(self, **kwargs):
pass
argument_spec = {
"gather_subset": dict(default=["!config"], type="list"),
"gather_network_resources": dict(type="list"),
}

@ -0,0 +1,263 @@
#
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The arg spec for the vyos_firewall_rules module
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
class Firewall_rulesArgs(object): # pylint: disable=R0903
"""The arg spec for the vyos_firewall_rules module
"""
def __init__(self, **kwargs):
pass
argument_spec = {
"config": {
"elements": "dict",
"options": {
"afi": {
"choices": ["ipv4", "ipv6"],
"required": True,
"type": "str",
},
"rule_sets": {
"elements": "dict",
"options": {
"default_action": {
"choices": ["drop", "reject", "accept"],
"type": "str",
},
"description": {"type": "str"},
"enable_default_log": {"type": "bool"},
"name": {"type": "str"},
"rules": {
"elements": "dict",
"options": {
"action": {
"choices": [
"drop",
"reject",
"accept",
"inspect",
],
"type": "str",
},
"description": {"type": "str"},
"destination": {
"options": {
"address": {"type": "str"},
"group": {
"options": {
"address_group": {
"type": "str"
},
"network_group": {
"type": "str"
},
"port_group": {"type": "str"},
},
"type": "dict",
},
"port": {"type": "str"},
},
"type": "dict",
},
"disabled": {"type": "bool"},
"fragment": {
"choices": [
"match-frag",
"match-non-frag",
],
"type": "str",
},
"icmp": {
"options": {
"code": {"type": "int"},
"type": {"type": "int"},
"type_name": {
"choices": [
"any",
"echo-reply",
"destination-unreachable",
"network-unreachable",
"host-unreachable",
"protocol-unreachable",
"port-unreachable",
"fragmentation-needed",
"source-route-failed",
"network-unknown",
"host-unknown",
"network-prohibited",
"host-prohibited",
"TOS-network-unreachable",
"TOS-host-unreachable",
"communication-prohibited",
"host-precedence-violation",
"precedence-cutoff",
"source-quench",
"redirect",
"network-redirect",
"host-redirect",
"TOS-network-redirect",
"TOS-host-redirect",
"echo-request",
"router-advertisement",
"router-solicitation",
"time-exceeded",
"ttl-zero-during-transit",
"ttl-zero-during-reassembly",
"parameter-problem",
"ip-header-bad",
"required-option-missing",
"timestamp-request",
"timestamp-reply",
"address-mask-request",
"address-mask-reply",
"ping",
"pong",
"ttl-exceeded",
],
"type": "str",
},
},
"type": "dict",
},
"ipsec": {
"choices": ["match-ipsec", "match-none"],
"type": "str",
},
"limit": {
"options": {
"burst": {"type": "int"},
"rate": {
"options": {
"number": {"type": "int"},
"unit": {"type": "str"},
},
"type": "dict",
},
},
"type": "dict",
},
"number": {"required": True, "type": "int"},
"p2p": {
"elements": "dict",
"options": {
"application": {
"choices": [
"all",
"applejuice",
"bittorrent",
"directconnect",
"edonkey",
"gnutella",
"kazaa",
],
"type": "str",
}
},
"type": "list",
},
"protocol": {"type": "str"},
"recent": {
"options": {
"count": {"type": "int"},
"time": {"type": "int"},
},
"type": "dict",
},
"source": {
"options": {
"address": {"type": "str"},
"group": {
"options": {
"address_group": {
"type": "str"
},
"network_group": {
"type": "str"
},
"port_group": {"type": "str"},
},
"type": "dict",
},
"mac_address": {"type": "str"},
"port": {"type": "str"},
},
"type": "dict",
},
"state": {
"options": {
"established": {"type": "bool"},
"invalid": {"type": "bool"},
"new": {"type": "bool"},
"related": {"type": "bool"},
},
"type": "dict",
},
"tcp": {
"options": {"flags": {"type": "str"}},
"type": "dict",
},
"time": {
"options": {
"monthdays": {"type": "str"},
"startdate": {"type": "str"},
"starttime": {"type": "str"},
"stopdate": {"type": "str"},
"stoptime": {"type": "str"},
"utc": {"type": "bool"},
"weekdays": {"type": "str"},
},
"type": "dict",
},
},
"type": "list",
},
},
"type": "list",
},
},
"type": "list",
},
"running_config": {"type": "str"},
"state": {
"choices": [
"merged",
"replaced",
"overridden",
"deleted",
"gathered",
"rendered",
"parsed",
],
"default": "merged",
"type": "str",
},
} # pylint: disable=C0301

@ -0,0 +1,69 @@
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The arg spec for the vyos_interfaces module
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
class InterfacesArgs(object): # pylint: disable=R0903
"""The arg spec for the vyos_interfaces module
"""
def __init__(self, **kwargs):
pass
argument_spec = {
"config": {
"elements": "dict",
"options": {
"description": {"type": "str"},
"duplex": {"choices": ["full", "half", "auto"]},
"enabled": {"default": True, "type": "bool"},
"mtu": {"type": "int"},
"name": {"required": True, "type": "str"},
"speed": {
"choices": ["auto", "10", "100", "1000", "2500", "10000"],
"type": "str",
},
"vifs": {
"elements": "dict",
"options": {
"vlan_id": {"type": "int"},
"description": {"type": "str"},
"enabled": {"default": True, "type": "bool"},
"mtu": {"type": "int"},
},
"type": "list",
},
},
"type": "list",
},
"state": {
"choices": ["merged", "replaced", "overridden", "deleted"],
"default": "merged",
"type": "str",
},
} # pylint: disable=C0301

@ -0,0 +1,81 @@
#
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The arg spec for the vyos_l3_interfaces module
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
class L3_interfacesArgs(object): # pylint: disable=R0903
"""The arg spec for the vyos_l3_interfaces module
"""
def __init__(self, **kwargs):
pass
argument_spec = {
"config": {
"elements": "dict",
"options": {
"ipv4": {
"elements": "dict",
"options": {"address": {"type": "str"}},
"type": "list",
},
"ipv6": {
"elements": "dict",
"options": {"address": {"type": "str"}},
"type": "list",
},
"name": {"required": True, "type": "str"},
"vifs": {
"elements": "dict",
"options": {
"ipv4": {
"elements": "dict",
"options": {"address": {"type": "str"}},
"type": "list",
},
"ipv6": {
"elements": "dict",
"options": {"address": {"type": "str"}},
"type": "list",
},
"vlan_id": {"type": "int"},
},
"type": "list",
},
},
"type": "list",
},
"state": {
"choices": ["merged", "replaced", "overridden", "deleted"],
"default": "merged",
"type": "str",
},
} # pylint: disable=C0301

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save