tests/ansible/gcloud: terraform conf for load testing

pull/612/head
David Wilson 5 years ago
parent 108015aa22
commit de2e1ec184

@ -0,0 +1,2 @@
terraform.tfstate*
.terraform

@ -0,0 +1,3 @@
default:
terraform fmt

@ -1,19 +1,89 @@
- hosts: controller - hosts: all
vars: become: true
git_username: '{{ lookup("pipe", "git config --global user.name") }}'
git_email: '{{ lookup("pipe", "git config --global user.email") }}'
tasks: tasks:
- apt: name={{item}} state=installed
with_items:
- openvpn
- tcpdump
- python-pip
- python-virtualenv
- strace
- libldap2-dev
- linux-perf
- libsasl2-dev
- build-essential
- git
- rsync
- file:
path: /etc/openvpn
state: directory
- copy:
dest: /etc/openvpn/secret
mode: '0600'
content: |
-----BEGIN OpenVPN Static key V1-----
f94005e4206828e281eb397aefd69b37
ebe6cd39057d5641c5d8dd539cd07651
557d94d0077852bd8f92b68bef927169
c5f0e42ac962a2cbbed35e107ffa0e71
1a2607c6bcd919ec5846917b20eb6684
c7505152815d6ed7b4420714777a3d4a
8edb27ca81971cba7a1e88fe3936e13b
85e9be6706a30cd1334836ed0f08e899
78942329a330392dff42e4570731ac24
9330358aaa6828c07ecb41fb9c498a89
1e0435c5a45bfed390cd2104073634ef
b00f9fae1d3c49ef5de51854103edac9
5ff39c9dfc66ae270510b2ffa74d87d2
9d4b3844b1e1473237bc6dc78fb03e2e
643ce58e667a532efceec7177367fb37
a16379a51e0a8c8e3ec00a59952b79d4
-----END OpenVPN Static key V1-----
- copy:
dest: /etc/openvpn/k3.conf
content: |
remote k3.botanicus.net
dev tun
ifconfig 10.18.0.1 10.18.0.2
secret secret
- shell: systemctl enable openvpn@k3.service
- shell: systemctl start openvpn@k3.service
- lineinfile: - lineinfile:
line: "{{item}}" line: "{{item}}"
path: /etc/sysctl.conf path: /etc/sysctl.conf
register: sysctl_conf register: sysctl_conf
become: true
with_items: with_items:
- "net.ipv4.ip_forward=1" - "net.ipv4.ip_forward=1"
- "kernel.perf_event_paranoid=-1" - "kernel.perf_event_paranoid=-1"
- shell: /sbin/sysctl -p
when: sysctl_conf.changed
- copy:
dest: /etc/rc.local
mode: "0744"
content: |
#!/bin/bash
iptables -t nat -F;
iptables -t nat -X;
iptables -t nat -A POSTROUTING -j MASQUERADE;
- shell: systemctl daemon-reload
- shell: systemctl enable rc-local
- shell: systemctl start rc-local
- hosts: all
vars:
git_username: '{{ lookup("pipe", "git config --global user.name") }}'
git_email: '{{ lookup("pipe", "git config --global user.email") }}'
tasks:
- copy: - copy:
src: ~/.ssh/id_gitlab src: ~/.ssh/id_gitlab
dest: ~/.ssh/id_gitlab dest: ~/.ssh/id_gitlab
@ -23,38 +93,6 @@
dest: ~/.ssh/config dest: ~/.ssh/config
src: ssh_config.j2 src: ssh_config.j2
- lineinfile:
line: "{{item}}"
path: /etc/sysctl.conf
become: true
with_items:
- net.ipv4.ip_forward=1
- kernel.perf_event_paranoid=-1
register: sysctl_conf
- shell: /sbin/sysctl -p
when: sysctl_conf.changed
become: true
- shell: |
iptables -t nat -F;
iptables -t nat -X;
iptables -t nat -A POSTROUTING -j MASQUERADE;
become: true
- apt: name={{item}} state=installed
become: true
with_items:
- python-pip
- python-virtualenv
- strace
- libldap2-dev
- linux-perf
- libsasl2-dev
- build-essential
- git
- rsync
- shell: "rsync -a ~/.ssh {{inventory_hostname}}:" - shell: "rsync -a ~/.ssh {{inventory_hostname}}:"
connection: local connection: local
@ -119,4 +157,3 @@
path: ~/prj/ansible/inventory/gcloud.py path: ~/prj/ansible/inventory/gcloud.py
state: link state: link
src: ~/mitogen/tests/ansible/lib/inventory/gcloud.py src: ~/mitogen/tests/ansible/lib/inventory/gcloud.py

@ -1,11 +0,0 @@
- hosts: localhost
tasks:
- command: date +%Y%m%d-%H%M%S
register: out
- set_fact:
instance_name: "controller-{{out.stdout}}"
- command: >
gcloud compute instances create {{instance_name}} --can-ip-forward --machine-type=n1-standard-8 --preemptible --scopes=compute-ro --image-project=debian-cloud --image-family=debian-9

@ -0,0 +1,143 @@
variable "node-count" {
default = 0
}
provider "google" {
project = "mitogen-load-testing"
region = "europe-west1"
zone = "europe-west1-d"
}
resource "google_compute_instance" "controller" {
name = "ansible-controller"
# machine_type = "n1-highcpu-32"
#machine_type = "f1-micro"
#machine_type = "custom-4-8192"
machine_type = "custom-1-1024"
allow_stopping_for_update = true
can_ip_forward = true
boot_disk {
initialize_params {
image = "debian-cloud/debian-9"
}
}
scheduling {
preemptible = true
automatic_restart = false
}
network_interface {
subnetwork = "${google_compute_subnetwork.loadtest-subnet.self_link}"
access_config = {}
}
provisioner "local-exec" {
command = "ssh-keygen -R ${google_compute_instance.controller.network_interface.0.access_config.0.nat_ip}"
}
provisioner "local-exec" {
command = "ansible-playbook -i ${google_compute_instance.controller.network_interface.0.access_config.0.nat_ip}, controller.yml"
}
}
resource "google_compute_network" "loadtest" {
name = "loadtest"
auto_create_subnetworks = false
}
resource "google_compute_subnetwork" "loadtest-subnet" {
name = "loadtest-subnet"
ip_cidr_range = "10.19.0.0/16"
network = "${google_compute_network.loadtest.id}"
}
resource "google_compute_firewall" "allow-all-in" {
name = "allow-all-in"
network = "${google_compute_network.loadtest.name}"
direction = "INGRESS"
allow {
protocol = "all"
}
}
resource "google_compute_firewall" "allow-all-out" {
name = "allow-all-out"
network = "${google_compute_network.loadtest.name}"
direction = "EGRESS"
allow {
protocol = "all"
}
}
resource "google_compute_route" "route-nodes-via-controller" {
name = "route-nodes-via-controller"
dest_range = "0.0.0.0/0"
network = "${google_compute_network.loadtest.name}"
next_hop_instance = "${google_compute_instance.controller.self_link}"
next_hop_instance_zone = "${google_compute_instance.controller.zone}"
priority = 800
tags = ["node"]
}
resource "google_compute_instance_template" "node" {
name = "node"
tags = ["node"]
machine_type = "custom-1-1024"
scheduling {
preemptible = true
automatic_restart = false
}
disk {
source_image = "debian-cloud/debian-9"
auto_delete = true
boot = true
}
network_interface {
subnetwork = "${google_compute_subnetwork.loadtest-subnet.self_link}"
}
}
#
# Compute Engine tops out at 1000 VMs per group
#
resource "google_compute_instance_group_manager" "nodes-a" {
name = "nodes-a"
base_instance_name = "node"
instance_template = "${google_compute_instance_template.node.self_link}"
target_size = "${var.node-count / 4}"
}
resource "google_compute_instance_group_manager" "nodes-b" {
name = "nodes-b"
base_instance_name = "node"
instance_template = "${google_compute_instance_template.node.self_link}"
target_size = "${var.node-count / 4}"
}
resource "google_compute_instance_group_manager" "nodes-c" {
name = "nodes-c"
base_instance_name = "node"
instance_template = "${google_compute_instance_template.node.self_link}"
target_size = "${var.node-count / 4}"
}
resource "google_compute_instance_group_manager" "nodes-d" {
name = "nodes-d"
base_instance_name = "node"
instance_template = "${google_compute_instance_template.node.self_link}"
target_size = "${var.node-count / 4}"
}
Loading…
Cancel
Save