Merged with upstream master

This commit is contained in:
Anton Nerozya 2017-07-25 16:46:17 +02:00
commit e1992caa8a
39 changed files with 310 additions and 104 deletions

View file

@ -92,7 +92,7 @@ before_script:
- echo ${PWD}
- echo "${STARTUP_SCRIPT}"
- >
ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local
ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local
${LOG_LEVEL}
-e cloud_image=${CLOUD_IMAGE}
-e cloud_region=${CLOUD_REGION}
@ -118,7 +118,7 @@ before_script:
${SSH_ARGS}
${LOG_LEVEL}
-e ansible_python_interpreter=${PYPATH}
-e ansible_ssh_user=${SSH_USER}
-e ansible_ssh_user=${SSH_USER}
-e bootstrap_os=${BOOTSTRAP_OS}
-e cert_management=${CERT_MGMT:-script}
-e cloud_provider=gce
@ -127,6 +127,7 @@ before_script:
-e download_run_once=${DOWNLOAD_RUN_ONCE}
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e kubedns_min_replicas=1
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
-e local_release_dir=${PWD}/downloads
-e resolvconf_mode=${RESOLVCONF_MODE}
@ -136,30 +137,31 @@ before_script:
# Repeat deployment if testing upgrade
- >
if [ "${UPGRADE_TEST}" != "false" ]; then
if [ "${UPGRADE_TEST}" != "false" ]; then
test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml";
test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml";
pip install ansible==2.3.0;
git checkout "${CI_BUILD_REF}";
ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER
${SSH_ARGS}
${LOG_LEVEL}
-e ansible_python_interpreter=${PYPATH}
-e ansible_ssh_user=${SSH_USER}
-e bootstrap_os=${BOOTSTRAP_OS}
-e cloud_provider=gce
-e deploy_netchecker=true
-e download_localhost=${DOWNLOAD_LOCALHOST}
-e download_run_once=${DOWNLOAD_RUN_ONCE}
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
-e local_release_dir=${PWD}/downloads
-e resolvconf_mode=${RESOLVCONF_MODE}
-e weave_cpu_requests=${WEAVE_CPU_LIMIT}
-e weave_cpu_limit=${WEAVE_CPU_LIMIT}
--limit "all:!fake_hosts"
$PLAYBOOK;
pip install ansible==2.3.0;
git checkout "${CI_BUILD_REF}";
ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER
${SSH_ARGS}
${LOG_LEVEL}
-e ansible_python_interpreter=${PYPATH}
-e ansible_ssh_user=${SSH_USER}
-e bootstrap_os=${BOOTSTRAP_OS}
-e cloud_provider=gce
-e deploy_netchecker=true
-e download_localhost=${DOWNLOAD_LOCALHOST}
-e download_run_once=${DOWNLOAD_RUN_ONCE}
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e kubedns_min_replicas=1
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
-e local_release_dir=${PWD}/downloads
-e resolvconf_mode=${RESOLVCONF_MODE}
-e weave_cpu_requests=${WEAVE_CPU_LIMIT}
-e weave_cpu_limit=${WEAVE_CPU_LIMIT}
--limit "all:!fake_hosts"
$PLAYBOOK;
fi
# Tests Cases
@ -175,40 +177,41 @@ before_script:
## Idempotency checks 1/5 (repeat deployment)
- >
if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
--private-key=${HOME}/.ssh/id_rsa
-e bootstrap_os=${BOOTSTRAP_OS}
-e ansible_python_interpreter=${PYPATH}
-e download_localhost=${DOWNLOAD_LOCALHOST}
-e download_run_once=${DOWNLOAD_RUN_ONCE}
-e deploy_netchecker=true
-e resolvconf_mode=${RESOLVCONF_MODE}
-e local_release_dir=${PWD}/downloads
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
--limit "all:!fake_hosts"
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
--private-key=${HOME}/.ssh/id_rsa
-e bootstrap_os=${BOOTSTRAP_OS}
-e ansible_python_interpreter=${PYPATH}
-e download_localhost=${DOWNLOAD_LOCALHOST}
-e download_run_once=${DOWNLOAD_RUN_ONCE}
-e deploy_netchecker=true
-e resolvconf_mode=${RESOLVCONF_MODE}
-e local_release_dir=${PWD}/downloads
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kubedns_min_replicas=1
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
--limit "all:!fake_hosts"
cluster.yml;
fi
## Idempotency checks 2/5 (Advanced DNS checks)
- >
if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
--limit "all:!fake_hosts"
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
--limit "all:!fake_hosts"
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
fi
## Idempotency checks 3/5 (reset deployment)
- >
if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
--private-key=${HOME}/.ssh/id_rsa
-e bootstrap_os=${BOOTSTRAP_OS}
-e ansible_python_interpreter=${PYPATH}
-e reset_confirmation=yes
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
--private-key=${HOME}/.ssh/id_rsa
-e bootstrap_os=${BOOTSTRAP_OS}
-e ansible_python_interpreter=${PYPATH}
-e reset_confirmation=yes
--limit "all:!fake_hosts"
reset.yml;
fi
@ -216,28 +219,29 @@ before_script:
## Idempotency checks 4/5 (redeploy after reset)
- >
if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
--private-key=${HOME}/.ssh/id_rsa
-e bootstrap_os=${BOOTSTRAP_OS}
-e ansible_python_interpreter=${PYPATH}
-e download_localhost=${DOWNLOAD_LOCALHOST}
-e download_run_once=${DOWNLOAD_RUN_ONCE}
-e deploy_netchecker=true
-e resolvconf_mode=${RESOLVCONF_MODE}
-e local_release_dir=${PWD}/downloads
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
--limit "all:!fake_hosts"
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
--private-key=${HOME}/.ssh/id_rsa
-e bootstrap_os=${BOOTSTRAP_OS}
-e ansible_python_interpreter=${PYPATH}
-e download_localhost=${DOWNLOAD_LOCALHOST}
-e download_run_once=${DOWNLOAD_RUN_ONCE}
-e deploy_netchecker=true
-e resolvconf_mode=${RESOLVCONF_MODE}
-e local_release_dir=${PWD}/downloads
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kubedns_min_replicas=1
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
--limit "all:!fake_hosts"
cluster.yml;
fi
## Idempotency checks 5/5 (Advanced DNS checks)
- >
if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
--limit "all:!fake_hosts"
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
--limit "all:!fake_hosts"
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
fi
@ -603,7 +607,7 @@ ci-authorized:
script:
- /bin/sh scripts/premoderator.sh
except: ['triggers', 'master']
syntax-check:
<<: *job
stage: unit-tests

View file

@ -34,6 +34,7 @@ To deploy the cluster you can use :
* [OpenStack](docs/openstack.md)
* [AWS](docs/aws.md)
* [Azure](docs/azure.md)
* [vSphere](docs/vsphere.md)
* [Large deployments](docs/large-deployments.md)
* [Upgrades basics](docs/upgrades.md)
* [Roadmap](docs/roadmap.md)
@ -52,7 +53,7 @@ Versions of supported components
--------------------------------
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.6.4 <br>
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.6.7 <br>
[etcd](https://github.com/coreos/etcd/releases) v3.0.17 <br>
[flanneld](https://github.com/coreos/flannel/releases) v0.6.2 <br>
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.23.0 <br>

View file

@ -3,7 +3,7 @@
The Kubespray Project is released on an as-needed basis. The process is as follows:
1. An issue is proposing a new release with a changelog since the last release
2. At least on of the [OWNERS](OWNERS) must LGTM this release
2. At least one of the [OWNERS](OWNERS) must LGTM this release
3. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
4. The release issue is closed
5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`

5
Vagrantfile vendored
View file

@ -30,8 +30,6 @@ $os = "ubuntu"
$etcd_instances = $num_instances
# The first two nodes are masters
$kube_master_instances = $num_instances == 1 ? $num_instances : ($num_instances - 1)
# All nodes are kube nodes
$kube_node_instances = $num_instances
$local_release_dir = "/vagrant/temp"
host_vars = {}
@ -40,6 +38,9 @@ if File.exist?(CONFIG)
require CONFIG
end
# All nodes are kube nodes
$kube_node_instances = $num_instances
$box = SUPPORTED_OS[$os][:box]
# if $inventory is not set, try to use example
$inventory = File.join(File.dirname(__FILE__), "inventory") if ! $inventory

View file

@ -19,6 +19,8 @@ admin_username: devops
admin_password: changeme
ssh_public_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLRzcxbsFDdEibiyXCSdIFh7bKbXso1NqlKjEyPTptf3aBXHEhVil0lJRjGpTlpfTy7PHvXFbXIOCdv9tOmeH1uxWDDeZawgPFV6VSZ1QneCL+8bxzhjiCn8133wBSPZkN8rbFKd9eEUUBfx8ipCblYblF9FcidylwtMt5TeEmXk8yRVkPiCuEYuDplhc2H0f4PsK3pFb5aDVdaDT3VeIypnOQZZoUxHWqm6ThyHrzLJd3SrZf+RROFWW1uInIDf/SZlXojczUYoffxgT1lERfOJCHJXsqbZWugbxQBwqsVsX59+KPxFFo6nV88h3UQr63wbFx52/MXkX4WrCkAHzN ablock-vwfs@dell-lappy"
kube_apiserver_port: 6443
# Azure CIDRs
azure_vnet_cidr: 10.0.0.0/8
azure_admin_cidr: 10.241.2.0/24

View file

@ -62,8 +62,8 @@
"id": "[concat(variables('lbID'), '/backendAddressPools/kube-api-backend')]"
},
"protocol": "tcp",
"frontendPort": 443,
"backendPort": 443,
"frontendPort": "{{kube_apiserver_port}}",
"backendPort": "{{kube_apiserver_port}}",
"enableFloatingIP": false,
"idleTimeoutInMinutes": 5,
"probe": {
@ -77,7 +77,7 @@
"name": "kube-api",
"properties": {
"protocol": "tcp",
"port": 443,
"port": "{{kube_apiserver_port}}",
"intervalInSeconds": 5,
"numberOfProbes": 2
}
@ -193,4 +193,4 @@
} {% if not loop.last %},{% endif %}
{% endfor %}
]
}
}

View file

@ -92,7 +92,7 @@
"description": "Allow secure kube-api",
"protocol": "Tcp",
"sourcePortRange": "*",
"destinationPortRange": "443",
"destinationPortRange": "{{kube_apiserver_port}}",
"sourceAddressPrefix": "Internet",
"destinationAddressPrefix": "*",
"access": "Allow",
@ -106,4 +106,4 @@
"dependsOn": []
}
]
}
}

View file

@ -36,6 +36,8 @@ Ensure your OpenStack **Identity v2** credentials are loaded in environment vari
$ source ~/.stackrc
```
> You must set **OS_REGION_NAME** and **OS_TENANT_ID** environment variables not required by openstack CLI
You will need two networks before installing, an internal network and
an external (floating IP Pool) network. The internet network can be shared as
we use security groups to provide network segregation. Due to the many
@ -99,6 +101,46 @@ ssh_user_gfs = "ubuntu"
If these variables are provided, this will give rise to a new ansible group called `gfs-cluster`, for which we have added ansible roles to execute in the ansible provisioning step. If you are using Container Linux by CoreOS, these GlusterFS VM necessarily need to be either Debian or RedHat based VMs, Container Linux by CoreOS cannot serve GlusterFS, but can connect to it through binaries available on hyperkube v1.4.3_coreos.0 or higher.
# Configure Cluster variables
Edit `inventory/group_vars/all.yml`:
- Set variable **bootstrap_os** according selected image
```
# Valid bootstrap options (required): ubuntu, coreos, centos, none
bootstrap_os: coreos
```
- **bin_dir**
```
# Directory where the binaries will be installed
# Default:
# bin_dir: /usr/local/bin
# For Container Linux by CoreOS:
bin_dir: /opt/bin
```
- and **cloud_provider**
```
cloud_provider: openstack
```
Edit `inventory/group_vars/k8s-cluster.yml`:
- Set variable **kube_network_plugin** according selected networking
```
# Choose network plugin (calico, weave or flannel)
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
kube_network_plugin: flannel
```
> flannel works out-of-the-box
> calico requires allowing service's and pod's subnets on according OpenStack Neutron ports
- Set variable **resolvconf_mode**
```
# Can be docker_dns, host_resolvconf or none
# Default:
# resolvconf_mode: docker_dns
# For Container Linux by CoreOS:
resolvconf_mode: host_resolvconf
```
For calico configure OpenStack Neutron ports: [OpenStack](/docs/openstack.md)
# Provision a Kubernetes Cluster on OpenStack
@ -156,6 +198,54 @@ Deploy kubernetes:
$ ansible-playbook --become -i contrib/terraform/openstack/hosts cluster.yml
```
# Set up local kubectl
1. Install kubectl on your workstation:
[Install and Set Up kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
2. Add route to internal IP of master node (if needed):
```
sudo route add [master-internal-ip] gw [router-ip]
```
or
```
sudo route add -net [internal-subnet]/24 gw [router-ip]
```
3. List Kubernetes certs&keys:
```
ssh [os-user]@[master-ip] sudo ls /etc/kubernetes/ssl/
```
4. Get admin's certs&key:
```
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1-key.pem > admin-key.pem
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1.pem > admin.pem
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/ca.pem > ca.pem
```
5. Edit OpenStack Neutron master's Security Group to allow TCP connections to port 6443
6. Configure kubectl:
```
kubectl config set-cluster default-cluster --server=https://[master-internal-ip]:6443 \
--certificate-authority=ca.pem
kubectl config set-credentials default-admin \
--certificate-authority=ca.pem \
--client-key=admin-key.pem \
--client-certificate=admin.pem
kubectl config set-credentials default-admin \
--certificate-authority=ca.pem \
--client-key=admin-key.pem \
--client-certificate=admin.pem
kubectl config set-context default-system --cluster=default-cluster --user=default-admin
kubectl config use-context default-system
```
7. Check it:
```
kubectl version
```
# What's next
[Start Hello Kubernetes Service](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster/)
# clean up:
```

View file

@ -5,6 +5,10 @@ To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provi
Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
You would also need to tag the resources in your VPC accordingly for the aws provider to utilize them. Tag the subnets and all instances that kubernetes will be run on with key `kuberentes.io/cluster/$cluster_name` (`$cluster_name` must be a unique identifier for the cluster). Tag the subnets that must be targetted by external ELBs with the key `kubernetes.io/role/elb` and internal ELBs with the key `kubernetes.io/role/internal-elb`.
Make sure your VPC has both DNS Hostnames support and Private DNS enabled.
The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`.
You can now create your cluster!

View file

@ -12,7 +12,7 @@ Etcd
----
The `etcd_access_endpoint` fact provides an access pattern for clients. And the
`etcd_multiaccess` (defaults to `True`) group var controlls that behavior.
`etcd_multiaccess` (defaults to `True`) group var controls that behavior.
It makes deployed components to access the etcd cluster members
directly: `http://ip1:2379, http://ip2:2379,...`. This mode assumes the clients
do a loadbalancing and handle HA for connections.
@ -28,9 +28,9 @@ is less efficient than a dedicated load balancer because it creates extra
health checks on the Kubernetes apiserver, but is more practical for scenarios
where an external LB or virtual IP management is inconvenient. This option is
configured by the variable `loadbalancer_apiserver_localhost` (defaults to `True`).
You may also define the port the local internal loadbalancer users by changing,
You may also define the port the local internal loadbalancer uses by changing,
`nginx_kube_apiserver_port`. This defaults to the value of `kube_apiserver_port`.
It is also import to note that Kubespray will only configure kubelet and kube-proxy
It is also important to note that Kubespray will only configure kubelet and kube-proxy
on non-master nodes to use the local internal loadbalancer.
If you choose to NOT use the local internal loadbalancer, you will need to configure

61
docs/vsphere.md Normal file
View file

@ -0,0 +1,61 @@
# vSphere cloud provider
Kubespray can be deployed with vSphere as Cloud provider. This feature supports
- Volumes
- Persistent Volumes
- Storage Classes and provisioning of volumes.
- vSphere Storage Policy Based Management for Containers orchestrated by Kubernetes.
## Prerequisites
You need at first to configure you vSphere environement by following the [official documentation](https://kubernetes.io/docs/getting-started-guides/vsphere/#vsphere-cloud-provider).
After this step you should have:
- UUID activated for each VM where Kubernetes will be deployed
- A vSphere account with required privileges
## Kubespray configuration
Fist you must define the cloud provider in `inventory/group_vars/all.yml` and set it to `vsphere`.
```yml
cloud_provider: vsphere
```
Then, in the same file, you need to declare your vCenter credential following the description bellow.
| Variable | Required | Type | Choices | Default | Comment |
|------------------------------|----------|---------|----------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| vsphere_vcenter_ip | TRUE | string | | | IP/URL of the vCenter |
| vsphere_vcenter_port | TRUE | integer | | | Port of the vCenter API. Commonly 443 |
| vsphere_insecure | TRUE | integer | 1, 0 | | set to 1 if the host above uses a self-signed cert |
| vsphere_user | TRUE | string | | | User name for vCenter with required privileges |
| vsphere_password | TRUE | string | | | Password for vCenter |
| vsphere_datacenter | TRUE | string | | | Datacenter name to use |
| vsphere_datastore | TRUE | string | | | Datastore name to use |
| vsphere_working_dir | TRUE | string | | | Working directory from the view "VMs and template" in the vCenter where VM are placed |
| vsphere_scsi_controller_type | TRUE | string | buslogic, pvscsi, parallel | pvscsi | SCSI controller name. Commonly "pvscsi". |
| vsphere_vm_uuid | FALSE | string | | | VM Instance UUID of virtual machine that host K8s master. Can be retrieved from instanceUuid property in VmConfigInfo, or as vc.uuid in VMX file or in `/sys/class/dmi/id/product_serial` |
| vsphere_public_network | FALSE | string | | Blank | Name of the network the VMs are joined to |
Example configuration
```yml
vsphere_vcenter_ip: "myvcenter.domain.com"
vsphere_vcenter_port: 443
vsphere_insecure: 1
vsphere_user: "k8s@vsphere.local"
vsphere_password: "K8s_admin"
vsphere_datacenter: "DATACENTER_name"
vsphere_datastore: "DATASTORE_name"
vsphere_working_dir: "Docker_hosts"
vsphere_scsi_controller_type: "pvscsi"
```
## Deployment
Once the configuration is set, you can execute the playbook again to apply the new configuration
```
cd kubespray
ansible-playbook -i inventory/inventory.cfg -b -v cluster.yml
```
You'll find some usefull examples [here](https://github.com/kubernetes/kubernetes/tree/master/examples/volumes/vsphere) to test your configuration.

View file

@ -98,3 +98,6 @@ bin_dir: /usr/local/bin
## Please specify true if you want to perform a kernel upgrade
kernel_upgrade: false
## Etcd auto compaction retention for mvcc key value store in hour
#etcd_compaction_retention: 0

View file

@ -23,7 +23,7 @@ kube_users_dir: "{{ kube_config_dir }}/users"
kube_api_anonymous_auth: false
## Change this to use another Kubernetes version, e.g. a current beta release
kube_version: v1.6.4
kube_version: v1.6.7
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
@ -39,6 +39,7 @@ kube_cert_group: kube-cert
kube_log_level: 2
# Users to create for basic auth in Kubernetes API via HTTP
# Optionally add groups for user
kube_api_pwd: "changeme"
kube_users:
kube:
@ -47,6 +48,8 @@ kube_users:
root:
pass: "{{kube_api_pwd}}"
role: admin
# groups:
# - system:masters
@ -71,6 +74,7 @@ kube_users:
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
kube_network_plugin: calico
# Enable kubernetes network policies
enable_network_policy: false
@ -156,3 +160,8 @@ helm_enabled: false
# -----BEGIN RSA PRIVATE KEY-----
# ...
# -----END RSA PRIVATE KEY-----
# dnsmasq
# dnsmasq_upstream_dns_servers:
# - /resolvethiszone.with/10.0.4.250
# - 8.8.8.8

View file

@ -66,7 +66,7 @@ options:
description:
- present handles checking existence or creating if definition file provided,
absent handles deleting resource(s) based on other options,
latest handles creating ore updating based on existence,
latest handles creating or updating based on existence,
reloaded handles updating resource(s) definition using definition file,
stopped handles stopping resource(s) based on other options.
requirements:

View file

@ -30,3 +30,6 @@ dns_memory_requests: 50Mi
# Autoscaler parameters
dnsmasq_nodes_per_replica: 10
dnsmasq_min_replicas: 1
# Custom name servers
dnsmasq_upstream_dns_servers: []

View file

@ -11,6 +11,11 @@ server=/{{ dns_domain }}/{{ skydns_server }}
local=/{{ bogus_domains }}
#Set upstream dns servers
{% if dnsmasq_upstream_dns_servers|length > 0 %}
{% for srv in dnsmasq_upstream_dns_servers %}
server={{ srv }}
{% endfor %}
{% endif %}
{% if system_and_upstream_dns_servers|length > 0 %}
{% for srv in system_and_upstream_dns_servers %}
server={{ srv }}

View file

@ -18,7 +18,7 @@ download_localhost: False
download_always_pull: False
# Versions
kube_version: v1.6.4
kube_version: v1.6.7
etcd_version: v3.0.17
#TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
# after migration to container download

View file

@ -22,3 +22,5 @@ etcd_memory_limit: 512M
#etcd_cpu_limit: 300m
etcd_node_cert_hosts: "{{ groups['k8s-cluster'] | union(groups.get('calico-rr', [])) }}"
etcd_compaction_retention: "0"

View file

@ -15,8 +15,8 @@ ExecStart=/usr/bin/rkt run \
--mount=volume=etc-ssl-certs,target=/etc/ssl/certs \
--volume=etcd-cert-dir,kind=host,source={{ etcd_cert_dir }},readOnly=true \
--mount=volume=etcd-cert-dir,target={{ etcd_cert_dir }} \
--volume=var-lib-etcd,kind=host,source={{ etcd_data_dir }},readOnly=false \
--mount=volume=var-lib-etcd,target=/var/lib/etcd \
--volume=etcd-data-dir,kind=host,source={{ etcd_data_dir }},readOnly=false \
--mount=volume=etcd-data-dir,target={{ etcd_data_dir }} \
--set-env-file=/etc/etcd.env \
--stage1-from-dir=stage1-fly.aci \
{{ etcd_image_repo }}:{{ etcd_image_tag }} \

View file

@ -12,6 +12,7 @@ ETCD_LISTEN_PEER_URLS=https://{{ etcd_address }}:2380
ETCD_NAME={{ etcd_member_name }}
ETCD_PROXY=off
ETCD_INITIAL_CLUSTER={{ etcd_peer_addresses }}
ETCD_AUTO_COMPACTION_RETENTION={{ etcd_compaction_retention }}
# TLS settings
ETCD_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem

View file

@ -5,7 +5,7 @@
--net=host \
-v /etc/ssl/certs:/etc/ssl/certs:ro \
-v {{ etcd_cert_dir }}:{{ etcd_cert_dir }}:ro \
-v {{ etcd_data_dir }}:/var/lib/etcd:rw \
-v {{ etcd_data_dir }}:{{ etcd_data_dir }}:rw \
{% if etcd_memory_limit is defined %}
--memory={{ etcd_memory_limit|regex_replace('Mi', 'M') }} \
{% endif %}

View file

@ -6,7 +6,7 @@ kubednsautoscaler_version: 1.1.1
dns_memory_limit: 170Mi
dns_cpu_requests: 100m
dns_memory_requests: 70Mi
kubedns_min_replicas: 1
kubedns_min_replicas: 2
kubedns_nodes_per_replica: 10
# Images

View file

@ -1,7 +1,7 @@
---
- name: Kubernetes Apps | Wait for kube-apiserver
uri:
url: http://localhost:8080/healthz
url: http://localhost:{{ kube_apiserver_insecure_port }}/healthz
register: result
until: result.status == 200
ignore_errors: "{{ ansible_check_mode }}"

View file

@ -41,7 +41,7 @@
- name: Master | wait for the apiserver to be running
uri:
url: http://localhost:8080/healthz
url: http://localhost:{{ kube_apiserver_insecure_port }}/healthz
register: result
until: result.status == 200
retries: 20

View file

@ -92,7 +92,7 @@ spec:
httpGet:
host: 127.0.0.1
path: /healthz
port: 8080
port: {{ kube_apiserver_insecure_port }}
initialDelaySeconds: 30
timeoutSeconds: 10
volumeMounts:
@ -124,4 +124,4 @@ spec:
- hostPath:
path: /etc/ssl/certs/ca-bundle.crt
name: rhel-ca-bundle
{% endif %}
{% endif %}

View file

@ -14,7 +14,11 @@ KUBELET_HOSTNAME="--hostname-override={{ ansible_hostname }}"
--kube-reserved cpu={{ kubelet_cpu_limit }},memory={{ kubelet_memory_limit|regex_replace('Mi', 'M') }} \
--node-status-update-frequency={{ kubelet_status_update_frequency }} \
{% if kube_version | version_compare('v1.6', '>=') %}
--enable-cri={{ kubelet_enable_cri }} --cgroups-per-qos={{ kubelet_cgroups_per_qos }} \
{# flag got removed with 1.7.0 #}
{% if kube_version | version_compare('v1.7', '<') %}
--enable-cri={{ kubelet_enable_cri }} \
{% endif %}
--cgroups-per-qos={{ kubelet_cgroups_per_qos }} \
--enforce-node-allocatable='{{ kubelet_enforce_node_allocatable }}' {% endif %}{% endset %}
{# DNS settings for kubelet #}

View file

@ -30,7 +30,7 @@ ExecStart=/usr/bin/rkt run \
--volume {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }},kind=host,source={{ dir }},readOnly=true \
{% endfor -%}
--volume var-lib-docker,kind=host,source={{ docker_daemon_graph }},readOnly=false \
--volume var-lib-kubelet,kind=host,source=/var/lib/kubelet,readOnly=false \
--volume var-lib-kubelet,kind=host,source=/var/lib/kubelet,readOnly=false,recursive=true \
--volume var-log,kind=host,source=/var/log \
{% if kube_network_plugin in ["calico", "weave", "canal"] %}
--volume etc-cni,kind=host,source=/etc/cni,readOnly=true \

View file

@ -32,7 +32,7 @@ openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}"
# For the vsphere integration, kubelet will need credentials to access
# vsphere apis
# Documentation regarting these values can be found
# Documentation regarding these values can be found
# https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vsphere/vsphere.go#L105
vsphere_vcenter_ip: "{{ lookup('env', 'VSPHERE_VCENTER') }}"
vsphere_vcenter_port: "{{ lookup('env', 'VSPHERE_VCENTER_PORT') }}"
@ -49,3 +49,6 @@ vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK')|default('')
# Container Linux by CoreOS cloud init config file to define /etc/resolv.conf content
# for hostnet pods and infra needs
resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf
# All inventory hostnames will be written into each /etc/hosts file.
populate_inventory_to_hosts_file: true

View file

@ -9,6 +9,7 @@
create: yes
backup: yes
marker: "# Ansible inventory hosts {mark}"
when: populate_inventory_to_hosts_file
- name: Hosts | populate kubernetes loadbalancer address into hosts file
lineinfile:

View file

@ -20,7 +20,7 @@
- set_fact:
kube_apiserver_endpoint: |-
{% if not is_kube_master and loadbalancer_apiserver_localhost|default(false) -%}
{% if not is_kube_master and loadbalancer_apiserver_localhost|default(true) -%}
https://localhost:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }}
{%- elif is_kube_master -%}
http://127.0.0.1:{{ kube_apiserver_insecure_port }}

View file

@ -27,12 +27,10 @@
group: "{{ kube_cert_group }}"
- name: Populate users for basic auth in API
lineinfile:
template:
src: known_users.csv.j2
dest: "{{ kube_users_dir }}/known_users.csv"
create: yes
line: '{{ item.value.pass }},{{ item.key }},{{ item.value.role }}'
backup: yes
with_dict: "{{ kube_users }}"
when: inventory_hostname in "{{ groups['kube-master'] }}" and kube_basic_auth|default(true)
notify: set secret_changed

View file

@ -0,0 +1,3 @@
{% for user in kube_users %}
{{kube_users[user].pass}},{{user}},{{kube_users[user].role}}{% if kube_users[user].groups is defined %},{% set groups_csv = kube_users[user].groups|join(',') -%}"{{groups_csv}}"{% endif %}
{% endfor %}

View file

@ -4,7 +4,7 @@ bootstrap_os: none
kube_api_anonymous_auth: false
## Change this to use another Kubernetes version, e.g. a current beta release
kube_version: v1.6.4
kube_version: v1.6.7
# Directory where the binaries will be installed
bin_dir: /usr/local/bin
@ -22,7 +22,7 @@ cluster_name: cluster.local
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
ndots: 2
# Can be dnsmasq_kubedns, kubedns or none
dns_mode: dnsmasq_kubedns
dns_mode: kubedns
# Can be docker_dns, host_resolvconf or none
resolvconf_mode: docker_dns
# Deploy netchecker app to verify DNS resolve as an HTTP service

View file

@ -3,7 +3,8 @@
nat_outgoing: true
# Use IP-over-IP encapsulation across hosts
ipip: false
ipip: true
ipip_mode: always # change to "cross-subnet" if you only want ipip encapsulation on traffic going across subnets
# Set to true if you want your calico cni binaries to overwrite the
# ones from hyperkube while leaving other cni plugins intact.

View file

@ -97,7 +97,7 @@
shell: >
echo '{
"kind": "ipPool",
"spec": {"disabled": false, "ipip": {"enabled": {{ cloud_provider is defined or ipip }}},
"spec": {"disabled": false, "ipip": {"enabled": {{ ipip }}, "mode": "{{ ipip_mode }}"},
"nat-outgoing": {{ nat_outgoing|default(false) and not peer_with_router|default(false) }}},
"apiVersion": "v1",
"metadata": {"cidr": "{{ kube_pods_subnet }}"}
@ -114,8 +114,7 @@
run_once: true
set_fact:
ipip_arg: "--ipip"
when: (legacy_calicoctl and
cloud_provider is defined or ipip)
when: (legacy_calicoctl and ipip )
tags: facts
- name: Calico (old) | Define nat-outgoing pool argument

View file

@ -83,6 +83,15 @@
- /etc/dhcp/dhclient.d/zdnsupdate.sh
- /etc/dhcp/dhclient-exit-hooks.d/zdnsupdate
- "{{ bin_dir }}/kubelet"
- "{{ bin_dir }}/kubernetes-scripts"
- /run/flannel
- /etc/flannel
- /run/kubernetes
- /usr/local/share/ca-certificates/kube-ca.crt
- /usr/local/share/ca-certificates/etcd-ca.crt
- /etc/ssl/certs/kube-ca.pem
- /etc/ssl/certs/etcd-ca.pem
- /var/log/pods/
tags: ['files']

View file

@ -3,4 +3,5 @@
- name: Uncordon node
command: "{{ bin_dir }}/kubectl uncordon {{ inventory_hostname }}"
delegate_to: "{{ groups['kube-master'][0] }}"
when: needs_cordoning|default(false)
when: (needs_cordoning|default(false)) and ( {%- if inventory_hostname in groups['kube-node'] -%} true {%- else -%} false {%- endif -%} )

View file

@ -1,3 +1,3 @@
drain_grace_period: 30
drain_timeout: 40s
drain_grace_period: 90
drain_timeout: 120s

View file

@ -67,6 +67,7 @@
- { role: kubernetes/node, tags: node }
- { role: kubernetes/master, tags: master }
- { role: network_plugin, tags: network }
- { role: upgrade/post-upgrade, tags: post-upgrade }
#Finally handle worker upgrades, based on given batch size
- hosts: kube-node:!kube-master