Added dynamic inventory for AWS as contrib
This commit is contained in:
parent
06057ed921
commit
29b2c602b6
2 changed files with 75 additions and 0 deletions
61
contrib/aws_inventory/kargo-aws-inventory.py
Executable file
61
contrib/aws_inventory/kargo-aws-inventory.py
Executable file
|
@ -0,0 +1,61 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
import boto3
|
||||||
|
import os
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
|
||||||
|
class SearchEC2Tags(object):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.parse_args()
|
||||||
|
if self.args.list:
|
||||||
|
self.search_tags()
|
||||||
|
if self.args.host:
|
||||||
|
data = {}
|
||||||
|
print json.dumps(data, indent=2)
|
||||||
|
|
||||||
|
def parse_args(self):
|
||||||
|
|
||||||
|
##Check if VPC_VISIBILITY is set, if not default to private
|
||||||
|
if "VPC_VISIBILITY" in os.environ:
|
||||||
|
self.vpc_visibility = os.environ['VPC_VISIBILITY']
|
||||||
|
else:
|
||||||
|
self.vpc_visibility = "private"
|
||||||
|
|
||||||
|
##Support --list and --host flags. We largely ignore the host one.
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument('--list', action='store_true', default=False, help='List instances')
|
||||||
|
parser.add_argument('--host', action='store_true', help='Get all the variables about a specific instance')
|
||||||
|
self.args = parser.parse_args()
|
||||||
|
|
||||||
|
def search_tags(self):
|
||||||
|
hosts = {}
|
||||||
|
hosts['_meta'] = { 'hostvars': {} }
|
||||||
|
|
||||||
|
##Search ec2 three times to find nodes of each group type. Relies on kargo-role key/value.
|
||||||
|
for group in ["kube-master", "kube-node", "etcd"]:
|
||||||
|
hosts[group] = []
|
||||||
|
tag_key = "kargo-role"
|
||||||
|
tag_value = ["*"+group+"*"]
|
||||||
|
region = os.environ['REGION']
|
||||||
|
|
||||||
|
ec2 = boto3.resource('ec2', region)
|
||||||
|
|
||||||
|
instances = ec2.instances.filter(Filters=[{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}])
|
||||||
|
for instance in instances:
|
||||||
|
if self.vpc_visibility == "public":
|
||||||
|
hosts[group].append(instance.public_dns_name)
|
||||||
|
hosts['_meta']['hostvars'][instance.public_dns_name] = {
|
||||||
|
'ansible_ssh_host': instance.public_ip_address
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
hosts[group].append(instance.private_dns_name)
|
||||||
|
hosts['_meta']['hostvars'][instance.private_dns_name] = {
|
||||||
|
'ansible_ssh_host': instance.private_ip_address
|
||||||
|
}
|
||||||
|
|
||||||
|
hosts['k8s-cluster'] = {'children':['kube-master', 'kube-node']}
|
||||||
|
print json.dumps(hosts, sort_keys=True, indent=2)
|
||||||
|
|
||||||
|
SearchEC2Tags()
|
14
docs/aws.md
14
docs/aws.md
|
@ -8,3 +8,17 @@ Prior to creating your instances, you **must** ensure that you have created IAM
|
||||||
The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`.
|
The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`.
|
||||||
|
|
||||||
You can now create your cluster!
|
You can now create your cluster!
|
||||||
|
|
||||||
|
### Dynamic Inventory ###
|
||||||
|
There is also a dynamic inventory script for AWS that can be used if desired. However, be aware that it makes some certain assumptions about how you'll create your inventory.
|
||||||
|
Guide:
|
||||||
|
- Create instances in AWS as needed.
|
||||||
|
- Add tags to the instances with a key of `kargo-role` and a value of `kube-master`, `etcd`, or `kube-node`. You can also share roles like `kube-master, etcd`
|
||||||
|
- Copy the `kargo-aws-inventory.py` script from `kargo/contrib/aws_inventory` to the `kargo/inventory` directory.
|
||||||
|
- Set the following AWS credentials and info as environment variables in your terminal:
|
||||||
|
```
|
||||||
|
export AWS_ACCESS_KEY_ID="xxxxx"
|
||||||
|
export AWS_SECRET_ACCESS_KEY="yyyyy"
|
||||||
|
export REGION="us-east-2"
|
||||||
|
```
|
||||||
|
- We will now create our cluster. There will be either one or two small changes. The first is that we will specify `-i inventory/kargo-aws-inventory.py` as our inventory script. The other is conditional. If your AWS instances are public facing, you can set the VPC_VISIBILITY variable to public and that will result in public IP and DNS names being passed into the inventory. This causes your cluster.yml command to look like `VPC_VISIBILITY="public" ansible-playbook ... cluster.yml`
|
||||||
|
|
Loading…
Reference in a new issue