From 29b2c602b6af2e60963e5feb629074e851116144 Mon Sep 17 00:00:00 2001 From: Spencer Smith Date: Thu, 18 May 2017 17:52:44 -0400 Subject: [PATCH 1/2] Added dynamic inventory for AWS as contrib --- contrib/aws_inventory/kargo-aws-inventory.py | 61 ++++++++++++++++++++ docs/aws.md | 14 +++++ 2 files changed, 75 insertions(+) create mode 100755 contrib/aws_inventory/kargo-aws-inventory.py diff --git a/contrib/aws_inventory/kargo-aws-inventory.py b/contrib/aws_inventory/kargo-aws-inventory.py new file mode 100755 index 000000000..d379be349 --- /dev/null +++ b/contrib/aws_inventory/kargo-aws-inventory.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python + +import boto3 +import os +import argparse +import json + +class SearchEC2Tags(object): + + def __init__(self): + self.parse_args() + if self.args.list: + self.search_tags() + if self.args.host: + data = {} + print json.dumps(data, indent=2) + + def parse_args(self): + + ##Check if VPC_VISIBILITY is set, if not default to private + if "VPC_VISIBILITY" in os.environ: + self.vpc_visibility = os.environ['VPC_VISIBILITY'] + else: + self.vpc_visibility = "private" + + ##Support --list and --host flags. We largely ignore the host one. + parser = argparse.ArgumentParser() + parser.add_argument('--list', action='store_true', default=False, help='List instances') + parser.add_argument('--host', action='store_true', help='Get all the variables about a specific instance') + self.args = parser.parse_args() + + def search_tags(self): + hosts = {} + hosts['_meta'] = { 'hostvars': {} } + + ##Search ec2 three times to find nodes of each group type. Relies on kargo-role key/value. + for group in ["kube-master", "kube-node", "etcd"]: + hosts[group] = [] + tag_key = "kargo-role" + tag_value = ["*"+group+"*"] + region = os.environ['REGION'] + + ec2 = boto3.resource('ec2', region) + + instances = ec2.instances.filter(Filters=[{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}]) + for instance in instances: + if self.vpc_visibility == "public": + hosts[group].append(instance.public_dns_name) + hosts['_meta']['hostvars'][instance.public_dns_name] = { + 'ansible_ssh_host': instance.public_ip_address + } + else: + hosts[group].append(instance.private_dns_name) + hosts['_meta']['hostvars'][instance.private_dns_name] = { + 'ansible_ssh_host': instance.private_ip_address + } + + hosts['k8s-cluster'] = {'children':['kube-master', 'kube-node']} + print json.dumps(hosts, sort_keys=True, indent=2) + +SearchEC2Tags() diff --git a/docs/aws.md b/docs/aws.md index b16b8d725..0defdf38b 100644 --- a/docs/aws.md +++ b/docs/aws.md @@ -8,3 +8,17 @@ Prior to creating your instances, you **must** ensure that you have created IAM The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`. You can now create your cluster! + +### Dynamic Inventory ### +There is also a dynamic inventory script for AWS that can be used if desired. However, be aware that it makes some certain assumptions about how you'll create your inventory. +Guide: +- Create instances in AWS as needed. +- Add tags to the instances with a key of `kargo-role` and a value of `kube-master`, `etcd`, or `kube-node`. You can also share roles like `kube-master, etcd` +- Copy the `kargo-aws-inventory.py` script from `kargo/contrib/aws_inventory` to the `kargo/inventory` directory. +- Set the following AWS credentials and info as environment variables in your terminal: +``` +export AWS_ACCESS_KEY_ID="xxxxx" +export AWS_SECRET_ACCESS_KEY="yyyyy" +export REGION="us-east-2" +``` +- We will now create our cluster. There will be either one or two small changes. The first is that we will specify `-i inventory/kargo-aws-inventory.py` as our inventory script. The other is conditional. If your AWS instances are public facing, you can set the VPC_VISIBILITY variable to public and that will result in public IP and DNS names being passed into the inventory. This causes your cluster.yml command to look like `VPC_VISIBILITY="public" ansible-playbook ... cluster.yml` From 9231e5c1743e665ff0a8d6c6f0ab7419369e0f24 Mon Sep 17 00:00:00 2001 From: Spencer Smith Date: Thu, 18 May 2017 17:57:30 -0400 Subject: [PATCH 2/2] Added example json --- docs/aws.md | 38 +++++++++++++++++++++++++++++++++++--- 1 file changed, 35 insertions(+), 3 deletions(-) diff --git a/docs/aws.md b/docs/aws.md index 0defdf38b..91bded11c 100644 --- a/docs/aws.md +++ b/docs/aws.md @@ -10,10 +10,42 @@ The next step is to make sure the hostnames in your `inventory` file are identic You can now create your cluster! ### Dynamic Inventory ### -There is also a dynamic inventory script for AWS that can be used if desired. However, be aware that it makes some certain assumptions about how you'll create your inventory. +There is also a dynamic inventory script for AWS that can be used if desired. However, be aware that it makes some certain assumptions about how you'll create your inventory. It also does not handle all use cases and groups that we may use as part of more advanced deployments. Additions welcome. + +This will produce an inventory that is passed into Ansible that looks like the following: +``` +{ + "_meta": { + "hostvars": { + "ip-172-31-3-xxx.us-east-2.compute.internal": { + "ansible_ssh_host": "172.31.3.xxx" + }, + "ip-172-31-8-xxx.us-east-2.compute.internal": { + "ansible_ssh_host": "172.31.8.xxx" + } + } + }, + "etcd": [ + "ip-172-31-3-xxx.us-east-2.compute.internal" + ], + "k8s-cluster": { + "children": [ + "kube-master", + "kube-node" + ] + }, + "kube-master": [ + "ip-172-31-3-xxx.us-east-2.compute.internal" + ], + "kube-node": [ + "ip-172-31-8-xxx.us-east-2.compute.internal" + ] +} +``` + Guide: - Create instances in AWS as needed. -- Add tags to the instances with a key of `kargo-role` and a value of `kube-master`, `etcd`, or `kube-node`. You can also share roles like `kube-master, etcd` +- Either during or after creation, add tags to the instances with a key of `kargo-role` and a value of `kube-master`, `etcd`, or `kube-node`. You can also share roles like `kube-master, etcd` - Copy the `kargo-aws-inventory.py` script from `kargo/contrib/aws_inventory` to the `kargo/inventory` directory. - Set the following AWS credentials and info as environment variables in your terminal: ``` @@ -21,4 +53,4 @@ export AWS_ACCESS_KEY_ID="xxxxx" export AWS_SECRET_ACCESS_KEY="yyyyy" export REGION="us-east-2" ``` -- We will now create our cluster. There will be either one or two small changes. The first is that we will specify `-i inventory/kargo-aws-inventory.py` as our inventory script. The other is conditional. If your AWS instances are public facing, you can set the VPC_VISIBILITY variable to public and that will result in public IP and DNS names being passed into the inventory. This causes your cluster.yml command to look like `VPC_VISIBILITY="public" ansible-playbook ... cluster.yml` +- We will now create our cluster. There will be either one or two small changes. The first is that we will specify `-i inventory/kargo-aws-inventory.py` as our inventory script. The other is conditional. If your AWS instances are public facing, you can set the `VPC_VISIBILITY` variable to `public` and that will result in public IP and DNS names being passed into the inventory. This causes your cluster.yml command to look like `VPC_VISIBILITY="public" ansible-playbook ... cluster.yml`