Merged with upstream master
This commit is contained in:
commit
e1992caa8a
39 changed files with 310 additions and 104 deletions
130
.gitlab-ci.yml
130
.gitlab-ci.yml
|
@ -92,7 +92,7 @@ before_script:
|
||||||
- echo ${PWD}
|
- echo ${PWD}
|
||||||
- echo "${STARTUP_SCRIPT}"
|
- echo "${STARTUP_SCRIPT}"
|
||||||
- >
|
- >
|
||||||
ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local
|
ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local
|
||||||
${LOG_LEVEL}
|
${LOG_LEVEL}
|
||||||
-e cloud_image=${CLOUD_IMAGE}
|
-e cloud_image=${CLOUD_IMAGE}
|
||||||
-e cloud_region=${CLOUD_REGION}
|
-e cloud_region=${CLOUD_REGION}
|
||||||
|
@ -118,7 +118,7 @@ before_script:
|
||||||
${SSH_ARGS}
|
${SSH_ARGS}
|
||||||
${LOG_LEVEL}
|
${LOG_LEVEL}
|
||||||
-e ansible_python_interpreter=${PYPATH}
|
-e ansible_python_interpreter=${PYPATH}
|
||||||
-e ansible_ssh_user=${SSH_USER}
|
-e ansible_ssh_user=${SSH_USER}
|
||||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||||
-e cert_management=${CERT_MGMT:-script}
|
-e cert_management=${CERT_MGMT:-script}
|
||||||
-e cloud_provider=gce
|
-e cloud_provider=gce
|
||||||
|
@ -127,6 +127,7 @@ before_script:
|
||||||
-e download_run_once=${DOWNLOAD_RUN_ONCE}
|
-e download_run_once=${DOWNLOAD_RUN_ONCE}
|
||||||
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
||||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||||
|
-e kubedns_min_replicas=1
|
||||||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
||||||
-e local_release_dir=${PWD}/downloads
|
-e local_release_dir=${PWD}/downloads
|
||||||
-e resolvconf_mode=${RESOLVCONF_MODE}
|
-e resolvconf_mode=${RESOLVCONF_MODE}
|
||||||
|
@ -136,30 +137,31 @@ before_script:
|
||||||
|
|
||||||
# Repeat deployment if testing upgrade
|
# Repeat deployment if testing upgrade
|
||||||
- >
|
- >
|
||||||
if [ "${UPGRADE_TEST}" != "false" ]; then
|
if [ "${UPGRADE_TEST}" != "false" ]; then
|
||||||
test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml";
|
test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml";
|
||||||
test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml";
|
test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml";
|
||||||
pip install ansible==2.3.0;
|
pip install ansible==2.3.0;
|
||||||
git checkout "${CI_BUILD_REF}";
|
git checkout "${CI_BUILD_REF}";
|
||||||
ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER
|
ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER
|
||||||
${SSH_ARGS}
|
${SSH_ARGS}
|
||||||
${LOG_LEVEL}
|
${LOG_LEVEL}
|
||||||
-e ansible_python_interpreter=${PYPATH}
|
-e ansible_python_interpreter=${PYPATH}
|
||||||
-e ansible_ssh_user=${SSH_USER}
|
-e ansible_ssh_user=${SSH_USER}
|
||||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||||
-e cloud_provider=gce
|
-e cloud_provider=gce
|
||||||
-e deploy_netchecker=true
|
-e deploy_netchecker=true
|
||||||
-e download_localhost=${DOWNLOAD_LOCALHOST}
|
-e download_localhost=${DOWNLOAD_LOCALHOST}
|
||||||
-e download_run_once=${DOWNLOAD_RUN_ONCE}
|
-e download_run_once=${DOWNLOAD_RUN_ONCE}
|
||||||
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
||||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
-e kubedns_min_replicas=1
|
||||||
-e local_release_dir=${PWD}/downloads
|
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
||||||
-e resolvconf_mode=${RESOLVCONF_MODE}
|
-e local_release_dir=${PWD}/downloads
|
||||||
-e weave_cpu_requests=${WEAVE_CPU_LIMIT}
|
-e resolvconf_mode=${RESOLVCONF_MODE}
|
||||||
-e weave_cpu_limit=${WEAVE_CPU_LIMIT}
|
-e weave_cpu_requests=${WEAVE_CPU_LIMIT}
|
||||||
--limit "all:!fake_hosts"
|
-e weave_cpu_limit=${WEAVE_CPU_LIMIT}
|
||||||
$PLAYBOOK;
|
--limit "all:!fake_hosts"
|
||||||
|
$PLAYBOOK;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Tests Cases
|
# Tests Cases
|
||||||
|
@ -175,40 +177,41 @@ before_script:
|
||||||
## Idempotency checks 1/5 (repeat deployment)
|
## Idempotency checks 1/5 (repeat deployment)
|
||||||
- >
|
- >
|
||||||
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
||||||
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
||||||
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||||
--private-key=${HOME}/.ssh/id_rsa
|
--private-key=${HOME}/.ssh/id_rsa
|
||||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||||
-e ansible_python_interpreter=${PYPATH}
|
-e ansible_python_interpreter=${PYPATH}
|
||||||
-e download_localhost=${DOWNLOAD_LOCALHOST}
|
-e download_localhost=${DOWNLOAD_LOCALHOST}
|
||||||
-e download_run_once=${DOWNLOAD_RUN_ONCE}
|
-e download_run_once=${DOWNLOAD_RUN_ONCE}
|
||||||
-e deploy_netchecker=true
|
-e deploy_netchecker=true
|
||||||
-e resolvconf_mode=${RESOLVCONF_MODE}
|
-e resolvconf_mode=${RESOLVCONF_MODE}
|
||||||
-e local_release_dir=${PWD}/downloads
|
-e local_release_dir=${PWD}/downloads
|
||||||
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
||||||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
-e kubedns_min_replicas=1
|
||||||
--limit "all:!fake_hosts"
|
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
||||||
|
--limit "all:!fake_hosts"
|
||||||
cluster.yml;
|
cluster.yml;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
## Idempotency checks 2/5 (Advanced DNS checks)
|
## Idempotency checks 2/5 (Advanced DNS checks)
|
||||||
- >
|
- >
|
||||||
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
||||||
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
|
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
|
||||||
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
|
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
|
||||||
--limit "all:!fake_hosts"
|
--limit "all:!fake_hosts"
|
||||||
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
|
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
## Idempotency checks 3/5 (reset deployment)
|
## Idempotency checks 3/5 (reset deployment)
|
||||||
- >
|
- >
|
||||||
if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
|
if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
|
||||||
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
||||||
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||||
--private-key=${HOME}/.ssh/id_rsa
|
--private-key=${HOME}/.ssh/id_rsa
|
||||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||||
-e ansible_python_interpreter=${PYPATH}
|
-e ansible_python_interpreter=${PYPATH}
|
||||||
-e reset_confirmation=yes
|
-e reset_confirmation=yes
|
||||||
--limit "all:!fake_hosts"
|
--limit "all:!fake_hosts"
|
||||||
reset.yml;
|
reset.yml;
|
||||||
fi
|
fi
|
||||||
|
@ -216,28 +219,29 @@ before_script:
|
||||||
## Idempotency checks 4/5 (redeploy after reset)
|
## Idempotency checks 4/5 (redeploy after reset)
|
||||||
- >
|
- >
|
||||||
if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
|
if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
|
||||||
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
||||||
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||||
--private-key=${HOME}/.ssh/id_rsa
|
--private-key=${HOME}/.ssh/id_rsa
|
||||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||||
-e ansible_python_interpreter=${PYPATH}
|
-e ansible_python_interpreter=${PYPATH}
|
||||||
-e download_localhost=${DOWNLOAD_LOCALHOST}
|
-e download_localhost=${DOWNLOAD_LOCALHOST}
|
||||||
-e download_run_once=${DOWNLOAD_RUN_ONCE}
|
-e download_run_once=${DOWNLOAD_RUN_ONCE}
|
||||||
-e deploy_netchecker=true
|
-e deploy_netchecker=true
|
||||||
-e resolvconf_mode=${RESOLVCONF_MODE}
|
-e resolvconf_mode=${RESOLVCONF_MODE}
|
||||||
-e local_release_dir=${PWD}/downloads
|
-e local_release_dir=${PWD}/downloads
|
||||||
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
||||||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
-e kubedns_min_replicas=1
|
||||||
--limit "all:!fake_hosts"
|
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
||||||
|
--limit "all:!fake_hosts"
|
||||||
cluster.yml;
|
cluster.yml;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
## Idempotency checks 5/5 (Advanced DNS checks)
|
## Idempotency checks 5/5 (Advanced DNS checks)
|
||||||
- >
|
- >
|
||||||
if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
|
if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
|
||||||
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
|
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
|
||||||
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
|
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
|
||||||
--limit "all:!fake_hosts"
|
--limit "all:!fake_hosts"
|
||||||
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
|
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -603,7 +607,7 @@ ci-authorized:
|
||||||
script:
|
script:
|
||||||
- /bin/sh scripts/premoderator.sh
|
- /bin/sh scripts/premoderator.sh
|
||||||
except: ['triggers', 'master']
|
except: ['triggers', 'master']
|
||||||
|
|
||||||
syntax-check:
|
syntax-check:
|
||||||
<<: *job
|
<<: *job
|
||||||
stage: unit-tests
|
stage: unit-tests
|
||||||
|
|
|
@ -34,6 +34,7 @@ To deploy the cluster you can use :
|
||||||
* [OpenStack](docs/openstack.md)
|
* [OpenStack](docs/openstack.md)
|
||||||
* [AWS](docs/aws.md)
|
* [AWS](docs/aws.md)
|
||||||
* [Azure](docs/azure.md)
|
* [Azure](docs/azure.md)
|
||||||
|
* [vSphere](docs/vsphere.md)
|
||||||
* [Large deployments](docs/large-deployments.md)
|
* [Large deployments](docs/large-deployments.md)
|
||||||
* [Upgrades basics](docs/upgrades.md)
|
* [Upgrades basics](docs/upgrades.md)
|
||||||
* [Roadmap](docs/roadmap.md)
|
* [Roadmap](docs/roadmap.md)
|
||||||
|
@ -52,7 +53,7 @@ Versions of supported components
|
||||||
--------------------------------
|
--------------------------------
|
||||||
|
|
||||||
|
|
||||||
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.6.4 <br>
|
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.6.7 <br>
|
||||||
[etcd](https://github.com/coreos/etcd/releases) v3.0.17 <br>
|
[etcd](https://github.com/coreos/etcd/releases) v3.0.17 <br>
|
||||||
[flanneld](https://github.com/coreos/flannel/releases) v0.6.2 <br>
|
[flanneld](https://github.com/coreos/flannel/releases) v0.6.2 <br>
|
||||||
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.23.0 <br>
|
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.23.0 <br>
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
The Kubespray Project is released on an as-needed basis. The process is as follows:
|
The Kubespray Project is released on an as-needed basis. The process is as follows:
|
||||||
|
|
||||||
1. An issue is proposing a new release with a changelog since the last release
|
1. An issue is proposing a new release with a changelog since the last release
|
||||||
2. At least on of the [OWNERS](OWNERS) must LGTM this release
|
2. At least one of the [OWNERS](OWNERS) must LGTM this release
|
||||||
3. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
|
3. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
|
||||||
4. The release issue is closed
|
4. The release issue is closed
|
||||||
5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||||
|
|
5
Vagrantfile
vendored
5
Vagrantfile
vendored
|
@ -30,8 +30,6 @@ $os = "ubuntu"
|
||||||
$etcd_instances = $num_instances
|
$etcd_instances = $num_instances
|
||||||
# The first two nodes are masters
|
# The first two nodes are masters
|
||||||
$kube_master_instances = $num_instances == 1 ? $num_instances : ($num_instances - 1)
|
$kube_master_instances = $num_instances == 1 ? $num_instances : ($num_instances - 1)
|
||||||
# All nodes are kube nodes
|
|
||||||
$kube_node_instances = $num_instances
|
|
||||||
$local_release_dir = "/vagrant/temp"
|
$local_release_dir = "/vagrant/temp"
|
||||||
|
|
||||||
host_vars = {}
|
host_vars = {}
|
||||||
|
@ -40,6 +38,9 @@ if File.exist?(CONFIG)
|
||||||
require CONFIG
|
require CONFIG
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# All nodes are kube nodes
|
||||||
|
$kube_node_instances = $num_instances
|
||||||
|
|
||||||
$box = SUPPORTED_OS[$os][:box]
|
$box = SUPPORTED_OS[$os][:box]
|
||||||
# if $inventory is not set, try to use example
|
# if $inventory is not set, try to use example
|
||||||
$inventory = File.join(File.dirname(__FILE__), "inventory") if ! $inventory
|
$inventory = File.join(File.dirname(__FILE__), "inventory") if ! $inventory
|
||||||
|
|
|
@ -19,6 +19,8 @@ admin_username: devops
|
||||||
admin_password: changeme
|
admin_password: changeme
|
||||||
ssh_public_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLRzcxbsFDdEibiyXCSdIFh7bKbXso1NqlKjEyPTptf3aBXHEhVil0lJRjGpTlpfTy7PHvXFbXIOCdv9tOmeH1uxWDDeZawgPFV6VSZ1QneCL+8bxzhjiCn8133wBSPZkN8rbFKd9eEUUBfx8ipCblYblF9FcidylwtMt5TeEmXk8yRVkPiCuEYuDplhc2H0f4PsK3pFb5aDVdaDT3VeIypnOQZZoUxHWqm6ThyHrzLJd3SrZf+RROFWW1uInIDf/SZlXojczUYoffxgT1lERfOJCHJXsqbZWugbxQBwqsVsX59+KPxFFo6nV88h3UQr63wbFx52/MXkX4WrCkAHzN ablock-vwfs@dell-lappy"
|
ssh_public_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLRzcxbsFDdEibiyXCSdIFh7bKbXso1NqlKjEyPTptf3aBXHEhVil0lJRjGpTlpfTy7PHvXFbXIOCdv9tOmeH1uxWDDeZawgPFV6VSZ1QneCL+8bxzhjiCn8133wBSPZkN8rbFKd9eEUUBfx8ipCblYblF9FcidylwtMt5TeEmXk8yRVkPiCuEYuDplhc2H0f4PsK3pFb5aDVdaDT3VeIypnOQZZoUxHWqm6ThyHrzLJd3SrZf+RROFWW1uInIDf/SZlXojczUYoffxgT1lERfOJCHJXsqbZWugbxQBwqsVsX59+KPxFFo6nV88h3UQr63wbFx52/MXkX4WrCkAHzN ablock-vwfs@dell-lappy"
|
||||||
|
|
||||||
|
kube_apiserver_port: 6443
|
||||||
|
|
||||||
# Azure CIDRs
|
# Azure CIDRs
|
||||||
azure_vnet_cidr: 10.0.0.0/8
|
azure_vnet_cidr: 10.0.0.0/8
|
||||||
azure_admin_cidr: 10.241.2.0/24
|
azure_admin_cidr: 10.241.2.0/24
|
||||||
|
|
|
@ -62,8 +62,8 @@
|
||||||
"id": "[concat(variables('lbID'), '/backendAddressPools/kube-api-backend')]"
|
"id": "[concat(variables('lbID'), '/backendAddressPools/kube-api-backend')]"
|
||||||
},
|
},
|
||||||
"protocol": "tcp",
|
"protocol": "tcp",
|
||||||
"frontendPort": 443,
|
"frontendPort": "{{kube_apiserver_port}}",
|
||||||
"backendPort": 443,
|
"backendPort": "{{kube_apiserver_port}}",
|
||||||
"enableFloatingIP": false,
|
"enableFloatingIP": false,
|
||||||
"idleTimeoutInMinutes": 5,
|
"idleTimeoutInMinutes": 5,
|
||||||
"probe": {
|
"probe": {
|
||||||
|
@ -77,7 +77,7 @@
|
||||||
"name": "kube-api",
|
"name": "kube-api",
|
||||||
"properties": {
|
"properties": {
|
||||||
"protocol": "tcp",
|
"protocol": "tcp",
|
||||||
"port": 443,
|
"port": "{{kube_apiserver_port}}",
|
||||||
"intervalInSeconds": 5,
|
"intervalInSeconds": 5,
|
||||||
"numberOfProbes": 2
|
"numberOfProbes": 2
|
||||||
}
|
}
|
||||||
|
@ -193,4 +193,4 @@
|
||||||
} {% if not loop.last %},{% endif %}
|
} {% if not loop.last %},{% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
|
@ -92,7 +92,7 @@
|
||||||
"description": "Allow secure kube-api",
|
"description": "Allow secure kube-api",
|
||||||
"protocol": "Tcp",
|
"protocol": "Tcp",
|
||||||
"sourcePortRange": "*",
|
"sourcePortRange": "*",
|
||||||
"destinationPortRange": "443",
|
"destinationPortRange": "{{kube_apiserver_port}}",
|
||||||
"sourceAddressPrefix": "Internet",
|
"sourceAddressPrefix": "Internet",
|
||||||
"destinationAddressPrefix": "*",
|
"destinationAddressPrefix": "*",
|
||||||
"access": "Allow",
|
"access": "Allow",
|
||||||
|
@ -106,4 +106,4 @@
|
||||||
"dependsOn": []
|
"dependsOn": []
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,6 +36,8 @@ Ensure your OpenStack **Identity v2** credentials are loaded in environment vari
|
||||||
$ source ~/.stackrc
|
$ source ~/.stackrc
|
||||||
```
|
```
|
||||||
|
|
||||||
|
> You must set **OS_REGION_NAME** and **OS_TENANT_ID** environment variables not required by openstack CLI
|
||||||
|
|
||||||
You will need two networks before installing, an internal network and
|
You will need two networks before installing, an internal network and
|
||||||
an external (floating IP Pool) network. The internet network can be shared as
|
an external (floating IP Pool) network. The internet network can be shared as
|
||||||
we use security groups to provide network segregation. Due to the many
|
we use security groups to provide network segregation. Due to the many
|
||||||
|
@ -99,6 +101,46 @@ ssh_user_gfs = "ubuntu"
|
||||||
|
|
||||||
If these variables are provided, this will give rise to a new ansible group called `gfs-cluster`, for which we have added ansible roles to execute in the ansible provisioning step. If you are using Container Linux by CoreOS, these GlusterFS VM necessarily need to be either Debian or RedHat based VMs, Container Linux by CoreOS cannot serve GlusterFS, but can connect to it through binaries available on hyperkube v1.4.3_coreos.0 or higher.
|
If these variables are provided, this will give rise to a new ansible group called `gfs-cluster`, for which we have added ansible roles to execute in the ansible provisioning step. If you are using Container Linux by CoreOS, these GlusterFS VM necessarily need to be either Debian or RedHat based VMs, Container Linux by CoreOS cannot serve GlusterFS, but can connect to it through binaries available on hyperkube v1.4.3_coreos.0 or higher.
|
||||||
|
|
||||||
|
# Configure Cluster variables
|
||||||
|
|
||||||
|
Edit `inventory/group_vars/all.yml`:
|
||||||
|
- Set variable **bootstrap_os** according selected image
|
||||||
|
```
|
||||||
|
# Valid bootstrap options (required): ubuntu, coreos, centos, none
|
||||||
|
bootstrap_os: coreos
|
||||||
|
```
|
||||||
|
- **bin_dir**
|
||||||
|
```
|
||||||
|
# Directory where the binaries will be installed
|
||||||
|
# Default:
|
||||||
|
# bin_dir: /usr/local/bin
|
||||||
|
# For Container Linux by CoreOS:
|
||||||
|
bin_dir: /opt/bin
|
||||||
|
```
|
||||||
|
- and **cloud_provider**
|
||||||
|
```
|
||||||
|
cloud_provider: openstack
|
||||||
|
```
|
||||||
|
Edit `inventory/group_vars/k8s-cluster.yml`:
|
||||||
|
- Set variable **kube_network_plugin** according selected networking
|
||||||
|
```
|
||||||
|
# Choose network plugin (calico, weave or flannel)
|
||||||
|
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
||||||
|
kube_network_plugin: flannel
|
||||||
|
```
|
||||||
|
> flannel works out-of-the-box
|
||||||
|
|
||||||
|
> calico requires allowing service's and pod's subnets on according OpenStack Neutron ports
|
||||||
|
- Set variable **resolvconf_mode**
|
||||||
|
```
|
||||||
|
# Can be docker_dns, host_resolvconf or none
|
||||||
|
# Default:
|
||||||
|
# resolvconf_mode: docker_dns
|
||||||
|
# For Container Linux by CoreOS:
|
||||||
|
resolvconf_mode: host_resolvconf
|
||||||
|
```
|
||||||
|
|
||||||
|
For calico configure OpenStack Neutron ports: [OpenStack](/docs/openstack.md)
|
||||||
|
|
||||||
# Provision a Kubernetes Cluster on OpenStack
|
# Provision a Kubernetes Cluster on OpenStack
|
||||||
|
|
||||||
|
@ -156,6 +198,54 @@ Deploy kubernetes:
|
||||||
$ ansible-playbook --become -i contrib/terraform/openstack/hosts cluster.yml
|
$ ansible-playbook --become -i contrib/terraform/openstack/hosts cluster.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
|
# Set up local kubectl
|
||||||
|
1. Install kubectl on your workstation:
|
||||||
|
[Install and Set Up kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
|
||||||
|
2. Add route to internal IP of master node (if needed):
|
||||||
|
```
|
||||||
|
sudo route add [master-internal-ip] gw [router-ip]
|
||||||
|
```
|
||||||
|
or
|
||||||
|
```
|
||||||
|
sudo route add -net [internal-subnet]/24 gw [router-ip]
|
||||||
|
```
|
||||||
|
3. List Kubernetes certs&keys:
|
||||||
|
```
|
||||||
|
ssh [os-user]@[master-ip] sudo ls /etc/kubernetes/ssl/
|
||||||
|
```
|
||||||
|
4. Get admin's certs&key:
|
||||||
|
```
|
||||||
|
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1-key.pem > admin-key.pem
|
||||||
|
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1.pem > admin.pem
|
||||||
|
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/ca.pem > ca.pem
|
||||||
|
```
|
||||||
|
5. Edit OpenStack Neutron master's Security Group to allow TCP connections to port 6443
|
||||||
|
6. Configure kubectl:
|
||||||
|
```
|
||||||
|
kubectl config set-cluster default-cluster --server=https://[master-internal-ip]:6443 \
|
||||||
|
--certificate-authority=ca.pem
|
||||||
|
|
||||||
|
kubectl config set-credentials default-admin \
|
||||||
|
--certificate-authority=ca.pem \
|
||||||
|
--client-key=admin-key.pem \
|
||||||
|
--client-certificate=admin.pem
|
||||||
|
|
||||||
|
kubectl config set-credentials default-admin \
|
||||||
|
--certificate-authority=ca.pem \
|
||||||
|
--client-key=admin-key.pem \
|
||||||
|
--client-certificate=admin.pem
|
||||||
|
|
||||||
|
kubectl config set-context default-system --cluster=default-cluster --user=default-admin
|
||||||
|
kubectl config use-context default-system
|
||||||
|
```
|
||||||
|
7. Check it:
|
||||||
|
```
|
||||||
|
kubectl version
|
||||||
|
```
|
||||||
|
|
||||||
|
# What's next
|
||||||
|
[Start Hello Kubernetes Service](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster/)
|
||||||
|
|
||||||
# clean up:
|
# clean up:
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
|
@ -5,6 +5,10 @@ To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provi
|
||||||
|
|
||||||
Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
|
Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
|
||||||
|
|
||||||
|
You would also need to tag the resources in your VPC accordingly for the aws provider to utilize them. Tag the subnets and all instances that kubernetes will be run on with key `kuberentes.io/cluster/$cluster_name` (`$cluster_name` must be a unique identifier for the cluster). Tag the subnets that must be targetted by external ELBs with the key `kubernetes.io/role/elb` and internal ELBs with the key `kubernetes.io/role/internal-elb`.
|
||||||
|
|
||||||
|
Make sure your VPC has both DNS Hostnames support and Private DNS enabled.
|
||||||
|
|
||||||
The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`.
|
The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`.
|
||||||
|
|
||||||
You can now create your cluster!
|
You can now create your cluster!
|
||||||
|
|
|
@ -12,7 +12,7 @@ Etcd
|
||||||
----
|
----
|
||||||
|
|
||||||
The `etcd_access_endpoint` fact provides an access pattern for clients. And the
|
The `etcd_access_endpoint` fact provides an access pattern for clients. And the
|
||||||
`etcd_multiaccess` (defaults to `True`) group var controlls that behavior.
|
`etcd_multiaccess` (defaults to `True`) group var controls that behavior.
|
||||||
It makes deployed components to access the etcd cluster members
|
It makes deployed components to access the etcd cluster members
|
||||||
directly: `http://ip1:2379, http://ip2:2379,...`. This mode assumes the clients
|
directly: `http://ip1:2379, http://ip2:2379,...`. This mode assumes the clients
|
||||||
do a loadbalancing and handle HA for connections.
|
do a loadbalancing and handle HA for connections.
|
||||||
|
@ -28,9 +28,9 @@ is less efficient than a dedicated load balancer because it creates extra
|
||||||
health checks on the Kubernetes apiserver, but is more practical for scenarios
|
health checks on the Kubernetes apiserver, but is more practical for scenarios
|
||||||
where an external LB or virtual IP management is inconvenient. This option is
|
where an external LB or virtual IP management is inconvenient. This option is
|
||||||
configured by the variable `loadbalancer_apiserver_localhost` (defaults to `True`).
|
configured by the variable `loadbalancer_apiserver_localhost` (defaults to `True`).
|
||||||
You may also define the port the local internal loadbalancer users by changing,
|
You may also define the port the local internal loadbalancer uses by changing,
|
||||||
`nginx_kube_apiserver_port`. This defaults to the value of `kube_apiserver_port`.
|
`nginx_kube_apiserver_port`. This defaults to the value of `kube_apiserver_port`.
|
||||||
It is also import to note that Kubespray will only configure kubelet and kube-proxy
|
It is also important to note that Kubespray will only configure kubelet and kube-proxy
|
||||||
on non-master nodes to use the local internal loadbalancer.
|
on non-master nodes to use the local internal loadbalancer.
|
||||||
|
|
||||||
If you choose to NOT use the local internal loadbalancer, you will need to configure
|
If you choose to NOT use the local internal loadbalancer, you will need to configure
|
||||||
|
|
61
docs/vsphere.md
Normal file
61
docs/vsphere.md
Normal file
|
@ -0,0 +1,61 @@
|
||||||
|
# vSphere cloud provider
|
||||||
|
|
||||||
|
Kubespray can be deployed with vSphere as Cloud provider. This feature supports
|
||||||
|
- Volumes
|
||||||
|
- Persistent Volumes
|
||||||
|
- Storage Classes and provisioning of volumes.
|
||||||
|
- vSphere Storage Policy Based Management for Containers orchestrated by Kubernetes.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
You need at first to configure you vSphere environement by following the [official documentation](https://kubernetes.io/docs/getting-started-guides/vsphere/#vsphere-cloud-provider).
|
||||||
|
|
||||||
|
After this step you should have:
|
||||||
|
- UUID activated for each VM where Kubernetes will be deployed
|
||||||
|
- A vSphere account with required privileges
|
||||||
|
|
||||||
|
## Kubespray configuration
|
||||||
|
|
||||||
|
Fist you must define the cloud provider in `inventory/group_vars/all.yml` and set it to `vsphere`.
|
||||||
|
```yml
|
||||||
|
cloud_provider: vsphere
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, in the same file, you need to declare your vCenter credential following the description bellow.
|
||||||
|
|
||||||
|
| Variable | Required | Type | Choices | Default | Comment |
|
||||||
|
|------------------------------|----------|---------|----------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| vsphere_vcenter_ip | TRUE | string | | | IP/URL of the vCenter |
|
||||||
|
| vsphere_vcenter_port | TRUE | integer | | | Port of the vCenter API. Commonly 443 |
|
||||||
|
| vsphere_insecure | TRUE | integer | 1, 0 | | set to 1 if the host above uses a self-signed cert |
|
||||||
|
| vsphere_user | TRUE | string | | | User name for vCenter with required privileges |
|
||||||
|
| vsphere_password | TRUE | string | | | Password for vCenter |
|
||||||
|
| vsphere_datacenter | TRUE | string | | | Datacenter name to use |
|
||||||
|
| vsphere_datastore | TRUE | string | | | Datastore name to use |
|
||||||
|
| vsphere_working_dir | TRUE | string | | | Working directory from the view "VMs and template" in the vCenter where VM are placed |
|
||||||
|
| vsphere_scsi_controller_type | TRUE | string | buslogic, pvscsi, parallel | pvscsi | SCSI controller name. Commonly "pvscsi". |
|
||||||
|
| vsphere_vm_uuid | FALSE | string | | | VM Instance UUID of virtual machine that host K8s master. Can be retrieved from instanceUuid property in VmConfigInfo, or as vc.uuid in VMX file or in `/sys/class/dmi/id/product_serial` |
|
||||||
|
| vsphere_public_network | FALSE | string | | Blank | Name of the network the VMs are joined to |
|
||||||
|
|
||||||
|
Example configuration
|
||||||
|
```yml
|
||||||
|
vsphere_vcenter_ip: "myvcenter.domain.com"
|
||||||
|
vsphere_vcenter_port: 443
|
||||||
|
vsphere_insecure: 1
|
||||||
|
vsphere_user: "k8s@vsphere.local"
|
||||||
|
vsphere_password: "K8s_admin"
|
||||||
|
vsphere_datacenter: "DATACENTER_name"
|
||||||
|
vsphere_datastore: "DATASTORE_name"
|
||||||
|
vsphere_working_dir: "Docker_hosts"
|
||||||
|
vsphere_scsi_controller_type: "pvscsi"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Deployment
|
||||||
|
|
||||||
|
Once the configuration is set, you can execute the playbook again to apply the new configuration
|
||||||
|
```
|
||||||
|
cd kubespray
|
||||||
|
ansible-playbook -i inventory/inventory.cfg -b -v cluster.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
You'll find some usefull examples [here](https://github.com/kubernetes/kubernetes/tree/master/examples/volumes/vsphere) to test your configuration.
|
|
@ -98,3 +98,6 @@ bin_dir: /usr/local/bin
|
||||||
|
|
||||||
## Please specify true if you want to perform a kernel upgrade
|
## Please specify true if you want to perform a kernel upgrade
|
||||||
kernel_upgrade: false
|
kernel_upgrade: false
|
||||||
|
|
||||||
|
## Etcd auto compaction retention for mvcc key value store in hour
|
||||||
|
#etcd_compaction_retention: 0
|
||||||
|
|
|
@ -23,7 +23,7 @@ kube_users_dir: "{{ kube_config_dir }}/users"
|
||||||
kube_api_anonymous_auth: false
|
kube_api_anonymous_auth: false
|
||||||
|
|
||||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||||
kube_version: v1.6.4
|
kube_version: v1.6.7
|
||||||
|
|
||||||
# Where the binaries will be downloaded.
|
# Where the binaries will be downloaded.
|
||||||
# Note: ensure that you've enough disk space (about 1G)
|
# Note: ensure that you've enough disk space (about 1G)
|
||||||
|
@ -39,6 +39,7 @@ kube_cert_group: kube-cert
|
||||||
kube_log_level: 2
|
kube_log_level: 2
|
||||||
|
|
||||||
# Users to create for basic auth in Kubernetes API via HTTP
|
# Users to create for basic auth in Kubernetes API via HTTP
|
||||||
|
# Optionally add groups for user
|
||||||
kube_api_pwd: "changeme"
|
kube_api_pwd: "changeme"
|
||||||
kube_users:
|
kube_users:
|
||||||
kube:
|
kube:
|
||||||
|
@ -47,6 +48,8 @@ kube_users:
|
||||||
root:
|
root:
|
||||||
pass: "{{kube_api_pwd}}"
|
pass: "{{kube_api_pwd}}"
|
||||||
role: admin
|
role: admin
|
||||||
|
# groups:
|
||||||
|
# - system:masters
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -71,6 +74,7 @@ kube_users:
|
||||||
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
||||||
kube_network_plugin: calico
|
kube_network_plugin: calico
|
||||||
|
|
||||||
|
|
||||||
# Enable kubernetes network policies
|
# Enable kubernetes network policies
|
||||||
enable_network_policy: false
|
enable_network_policy: false
|
||||||
|
|
||||||
|
@ -156,3 +160,8 @@ helm_enabled: false
|
||||||
# -----BEGIN RSA PRIVATE KEY-----
|
# -----BEGIN RSA PRIVATE KEY-----
|
||||||
# ...
|
# ...
|
||||||
# -----END RSA PRIVATE KEY-----
|
# -----END RSA PRIVATE KEY-----
|
||||||
|
|
||||||
|
# dnsmasq
|
||||||
|
# dnsmasq_upstream_dns_servers:
|
||||||
|
# - /resolvethiszone.with/10.0.4.250
|
||||||
|
# - 8.8.8.8
|
||||||
|
|
|
@ -66,7 +66,7 @@ options:
|
||||||
description:
|
description:
|
||||||
- present handles checking existence or creating if definition file provided,
|
- present handles checking existence or creating if definition file provided,
|
||||||
absent handles deleting resource(s) based on other options,
|
absent handles deleting resource(s) based on other options,
|
||||||
latest handles creating ore updating based on existence,
|
latest handles creating or updating based on existence,
|
||||||
reloaded handles updating resource(s) definition using definition file,
|
reloaded handles updating resource(s) definition using definition file,
|
||||||
stopped handles stopping resource(s) based on other options.
|
stopped handles stopping resource(s) based on other options.
|
||||||
requirements:
|
requirements:
|
||||||
|
|
|
@ -30,3 +30,6 @@ dns_memory_requests: 50Mi
|
||||||
# Autoscaler parameters
|
# Autoscaler parameters
|
||||||
dnsmasq_nodes_per_replica: 10
|
dnsmasq_nodes_per_replica: 10
|
||||||
dnsmasq_min_replicas: 1
|
dnsmasq_min_replicas: 1
|
||||||
|
|
||||||
|
# Custom name servers
|
||||||
|
dnsmasq_upstream_dns_servers: []
|
||||||
|
|
|
@ -11,6 +11,11 @@ server=/{{ dns_domain }}/{{ skydns_server }}
|
||||||
local=/{{ bogus_domains }}
|
local=/{{ bogus_domains }}
|
||||||
|
|
||||||
#Set upstream dns servers
|
#Set upstream dns servers
|
||||||
|
{% if dnsmasq_upstream_dns_servers|length > 0 %}
|
||||||
|
{% for srv in dnsmasq_upstream_dns_servers %}
|
||||||
|
server={{ srv }}
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
{% if system_and_upstream_dns_servers|length > 0 %}
|
{% if system_and_upstream_dns_servers|length > 0 %}
|
||||||
{% for srv in system_and_upstream_dns_servers %}
|
{% for srv in system_and_upstream_dns_servers %}
|
||||||
server={{ srv }}
|
server={{ srv }}
|
||||||
|
|
|
@ -18,7 +18,7 @@ download_localhost: False
|
||||||
download_always_pull: False
|
download_always_pull: False
|
||||||
|
|
||||||
# Versions
|
# Versions
|
||||||
kube_version: v1.6.4
|
kube_version: v1.6.7
|
||||||
etcd_version: v3.0.17
|
etcd_version: v3.0.17
|
||||||
#TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
|
#TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
|
||||||
# after migration to container download
|
# after migration to container download
|
||||||
|
|
|
@ -22,3 +22,5 @@ etcd_memory_limit: 512M
|
||||||
#etcd_cpu_limit: 300m
|
#etcd_cpu_limit: 300m
|
||||||
|
|
||||||
etcd_node_cert_hosts: "{{ groups['k8s-cluster'] | union(groups.get('calico-rr', [])) }}"
|
etcd_node_cert_hosts: "{{ groups['k8s-cluster'] | union(groups.get('calico-rr', [])) }}"
|
||||||
|
|
||||||
|
etcd_compaction_retention: "0"
|
||||||
|
|
|
@ -15,8 +15,8 @@ ExecStart=/usr/bin/rkt run \
|
||||||
--mount=volume=etc-ssl-certs,target=/etc/ssl/certs \
|
--mount=volume=etc-ssl-certs,target=/etc/ssl/certs \
|
||||||
--volume=etcd-cert-dir,kind=host,source={{ etcd_cert_dir }},readOnly=true \
|
--volume=etcd-cert-dir,kind=host,source={{ etcd_cert_dir }},readOnly=true \
|
||||||
--mount=volume=etcd-cert-dir,target={{ etcd_cert_dir }} \
|
--mount=volume=etcd-cert-dir,target={{ etcd_cert_dir }} \
|
||||||
--volume=var-lib-etcd,kind=host,source={{ etcd_data_dir }},readOnly=false \
|
--volume=etcd-data-dir,kind=host,source={{ etcd_data_dir }},readOnly=false \
|
||||||
--mount=volume=var-lib-etcd,target=/var/lib/etcd \
|
--mount=volume=etcd-data-dir,target={{ etcd_data_dir }} \
|
||||||
--set-env-file=/etc/etcd.env \
|
--set-env-file=/etc/etcd.env \
|
||||||
--stage1-from-dir=stage1-fly.aci \
|
--stage1-from-dir=stage1-fly.aci \
|
||||||
{{ etcd_image_repo }}:{{ etcd_image_tag }} \
|
{{ etcd_image_repo }}:{{ etcd_image_tag }} \
|
||||||
|
|
|
@ -12,6 +12,7 @@ ETCD_LISTEN_PEER_URLS=https://{{ etcd_address }}:2380
|
||||||
ETCD_NAME={{ etcd_member_name }}
|
ETCD_NAME={{ etcd_member_name }}
|
||||||
ETCD_PROXY=off
|
ETCD_PROXY=off
|
||||||
ETCD_INITIAL_CLUSTER={{ etcd_peer_addresses }}
|
ETCD_INITIAL_CLUSTER={{ etcd_peer_addresses }}
|
||||||
|
ETCD_AUTO_COMPACTION_RETENTION={{ etcd_compaction_retention }}
|
||||||
|
|
||||||
# TLS settings
|
# TLS settings
|
||||||
ETCD_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem
|
ETCD_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
--net=host \
|
--net=host \
|
||||||
-v /etc/ssl/certs:/etc/ssl/certs:ro \
|
-v /etc/ssl/certs:/etc/ssl/certs:ro \
|
||||||
-v {{ etcd_cert_dir }}:{{ etcd_cert_dir }}:ro \
|
-v {{ etcd_cert_dir }}:{{ etcd_cert_dir }}:ro \
|
||||||
-v {{ etcd_data_dir }}:/var/lib/etcd:rw \
|
-v {{ etcd_data_dir }}:{{ etcd_data_dir }}:rw \
|
||||||
{% if etcd_memory_limit is defined %}
|
{% if etcd_memory_limit is defined %}
|
||||||
--memory={{ etcd_memory_limit|regex_replace('Mi', 'M') }} \
|
--memory={{ etcd_memory_limit|regex_replace('Mi', 'M') }} \
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
|
@ -6,7 +6,7 @@ kubednsautoscaler_version: 1.1.1
|
||||||
dns_memory_limit: 170Mi
|
dns_memory_limit: 170Mi
|
||||||
dns_cpu_requests: 100m
|
dns_cpu_requests: 100m
|
||||||
dns_memory_requests: 70Mi
|
dns_memory_requests: 70Mi
|
||||||
kubedns_min_replicas: 1
|
kubedns_min_replicas: 2
|
||||||
kubedns_nodes_per_replica: 10
|
kubedns_nodes_per_replica: 10
|
||||||
|
|
||||||
# Images
|
# Images
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
- name: Kubernetes Apps | Wait for kube-apiserver
|
- name: Kubernetes Apps | Wait for kube-apiserver
|
||||||
uri:
|
uri:
|
||||||
url: http://localhost:8080/healthz
|
url: http://localhost:{{ kube_apiserver_insecure_port }}/healthz
|
||||||
register: result
|
register: result
|
||||||
until: result.status == 200
|
until: result.status == 200
|
||||||
ignore_errors: "{{ ansible_check_mode }}"
|
ignore_errors: "{{ ansible_check_mode }}"
|
||||||
|
|
|
@ -41,7 +41,7 @@
|
||||||
|
|
||||||
- name: Master | wait for the apiserver to be running
|
- name: Master | wait for the apiserver to be running
|
||||||
uri:
|
uri:
|
||||||
url: http://localhost:8080/healthz
|
url: http://localhost:{{ kube_apiserver_insecure_port }}/healthz
|
||||||
register: result
|
register: result
|
||||||
until: result.status == 200
|
until: result.status == 200
|
||||||
retries: 20
|
retries: 20
|
||||||
|
|
|
@ -92,7 +92,7 @@ spec:
|
||||||
httpGet:
|
httpGet:
|
||||||
host: 127.0.0.1
|
host: 127.0.0.1
|
||||||
path: /healthz
|
path: /healthz
|
||||||
port: 8080
|
port: {{ kube_apiserver_insecure_port }}
|
||||||
initialDelaySeconds: 30
|
initialDelaySeconds: 30
|
||||||
timeoutSeconds: 10
|
timeoutSeconds: 10
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
|
@ -124,4 +124,4 @@ spec:
|
||||||
- hostPath:
|
- hostPath:
|
||||||
path: /etc/ssl/certs/ca-bundle.crt
|
path: /etc/ssl/certs/ca-bundle.crt
|
||||||
name: rhel-ca-bundle
|
name: rhel-ca-bundle
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
|
@ -14,7 +14,11 @@ KUBELET_HOSTNAME="--hostname-override={{ ansible_hostname }}"
|
||||||
--kube-reserved cpu={{ kubelet_cpu_limit }},memory={{ kubelet_memory_limit|regex_replace('Mi', 'M') }} \
|
--kube-reserved cpu={{ kubelet_cpu_limit }},memory={{ kubelet_memory_limit|regex_replace('Mi', 'M') }} \
|
||||||
--node-status-update-frequency={{ kubelet_status_update_frequency }} \
|
--node-status-update-frequency={{ kubelet_status_update_frequency }} \
|
||||||
{% if kube_version | version_compare('v1.6', '>=') %}
|
{% if kube_version | version_compare('v1.6', '>=') %}
|
||||||
--enable-cri={{ kubelet_enable_cri }} --cgroups-per-qos={{ kubelet_cgroups_per_qos }} \
|
{# flag got removed with 1.7.0 #}
|
||||||
|
{% if kube_version | version_compare('v1.7', '<') %}
|
||||||
|
--enable-cri={{ kubelet_enable_cri }} \
|
||||||
|
{% endif %}
|
||||||
|
--cgroups-per-qos={{ kubelet_cgroups_per_qos }} \
|
||||||
--enforce-node-allocatable='{{ kubelet_enforce_node_allocatable }}' {% endif %}{% endset %}
|
--enforce-node-allocatable='{{ kubelet_enforce_node_allocatable }}' {% endif %}{% endset %}
|
||||||
|
|
||||||
{# DNS settings for kubelet #}
|
{# DNS settings for kubelet #}
|
||||||
|
|
|
@ -30,7 +30,7 @@ ExecStart=/usr/bin/rkt run \
|
||||||
--volume {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }},kind=host,source={{ dir }},readOnly=true \
|
--volume {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }},kind=host,source={{ dir }},readOnly=true \
|
||||||
{% endfor -%}
|
{% endfor -%}
|
||||||
--volume var-lib-docker,kind=host,source={{ docker_daemon_graph }},readOnly=false \
|
--volume var-lib-docker,kind=host,source={{ docker_daemon_graph }},readOnly=false \
|
||||||
--volume var-lib-kubelet,kind=host,source=/var/lib/kubelet,readOnly=false \
|
--volume var-lib-kubelet,kind=host,source=/var/lib/kubelet,readOnly=false,recursive=true \
|
||||||
--volume var-log,kind=host,source=/var/log \
|
--volume var-log,kind=host,source=/var/log \
|
||||||
{% if kube_network_plugin in ["calico", "weave", "canal"] %}
|
{% if kube_network_plugin in ["calico", "weave", "canal"] %}
|
||||||
--volume etc-cni,kind=host,source=/etc/cni,readOnly=true \
|
--volume etc-cni,kind=host,source=/etc/cni,readOnly=true \
|
||||||
|
|
|
@ -32,7 +32,7 @@ openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}"
|
||||||
|
|
||||||
# For the vsphere integration, kubelet will need credentials to access
|
# For the vsphere integration, kubelet will need credentials to access
|
||||||
# vsphere apis
|
# vsphere apis
|
||||||
# Documentation regarting these values can be found
|
# Documentation regarding these values can be found
|
||||||
# https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vsphere/vsphere.go#L105
|
# https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vsphere/vsphere.go#L105
|
||||||
vsphere_vcenter_ip: "{{ lookup('env', 'VSPHERE_VCENTER') }}"
|
vsphere_vcenter_ip: "{{ lookup('env', 'VSPHERE_VCENTER') }}"
|
||||||
vsphere_vcenter_port: "{{ lookup('env', 'VSPHERE_VCENTER_PORT') }}"
|
vsphere_vcenter_port: "{{ lookup('env', 'VSPHERE_VCENTER_PORT') }}"
|
||||||
|
@ -49,3 +49,6 @@ vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK')|default('')
|
||||||
# Container Linux by CoreOS cloud init config file to define /etc/resolv.conf content
|
# Container Linux by CoreOS cloud init config file to define /etc/resolv.conf content
|
||||||
# for hostnet pods and infra needs
|
# for hostnet pods and infra needs
|
||||||
resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf
|
resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf
|
||||||
|
|
||||||
|
# All inventory hostnames will be written into each /etc/hosts file.
|
||||||
|
populate_inventory_to_hosts_file: true
|
||||||
|
|
|
@ -9,6 +9,7 @@
|
||||||
create: yes
|
create: yes
|
||||||
backup: yes
|
backup: yes
|
||||||
marker: "# Ansible inventory hosts {mark}"
|
marker: "# Ansible inventory hosts {mark}"
|
||||||
|
when: populate_inventory_to_hosts_file
|
||||||
|
|
||||||
- name: Hosts | populate kubernetes loadbalancer address into hosts file
|
- name: Hosts | populate kubernetes loadbalancer address into hosts file
|
||||||
lineinfile:
|
lineinfile:
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
|
|
||||||
- set_fact:
|
- set_fact:
|
||||||
kube_apiserver_endpoint: |-
|
kube_apiserver_endpoint: |-
|
||||||
{% if not is_kube_master and loadbalancer_apiserver_localhost|default(false) -%}
|
{% if not is_kube_master and loadbalancer_apiserver_localhost|default(true) -%}
|
||||||
https://localhost:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }}
|
https://localhost:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }}
|
||||||
{%- elif is_kube_master -%}
|
{%- elif is_kube_master -%}
|
||||||
http://127.0.0.1:{{ kube_apiserver_insecure_port }}
|
http://127.0.0.1:{{ kube_apiserver_insecure_port }}
|
||||||
|
|
|
@ -27,12 +27,10 @@
|
||||||
group: "{{ kube_cert_group }}"
|
group: "{{ kube_cert_group }}"
|
||||||
|
|
||||||
- name: Populate users for basic auth in API
|
- name: Populate users for basic auth in API
|
||||||
lineinfile:
|
template:
|
||||||
|
src: known_users.csv.j2
|
||||||
dest: "{{ kube_users_dir }}/known_users.csv"
|
dest: "{{ kube_users_dir }}/known_users.csv"
|
||||||
create: yes
|
|
||||||
line: '{{ item.value.pass }},{{ item.key }},{{ item.value.role }}'
|
|
||||||
backup: yes
|
backup: yes
|
||||||
with_dict: "{{ kube_users }}"
|
|
||||||
when: inventory_hostname in "{{ groups['kube-master'] }}" and kube_basic_auth|default(true)
|
when: inventory_hostname in "{{ groups['kube-master'] }}" and kube_basic_auth|default(true)
|
||||||
notify: set secret_changed
|
notify: set secret_changed
|
||||||
|
|
||||||
|
|
3
roles/kubernetes/secrets/templates/known_users.csv.j2
Normal file
3
roles/kubernetes/secrets/templates/known_users.csv.j2
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
{% for user in kube_users %}
|
||||||
|
{{kube_users[user].pass}},{{user}},{{kube_users[user].role}}{% if kube_users[user].groups is defined %},{% set groups_csv = kube_users[user].groups|join(',') -%}"{{groups_csv}}"{% endif %}
|
||||||
|
{% endfor %}
|
|
@ -4,7 +4,7 @@ bootstrap_os: none
|
||||||
kube_api_anonymous_auth: false
|
kube_api_anonymous_auth: false
|
||||||
|
|
||||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||||
kube_version: v1.6.4
|
kube_version: v1.6.7
|
||||||
|
|
||||||
# Directory where the binaries will be installed
|
# Directory where the binaries will be installed
|
||||||
bin_dir: /usr/local/bin
|
bin_dir: /usr/local/bin
|
||||||
|
@ -22,7 +22,7 @@ cluster_name: cluster.local
|
||||||
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
|
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
|
||||||
ndots: 2
|
ndots: 2
|
||||||
# Can be dnsmasq_kubedns, kubedns or none
|
# Can be dnsmasq_kubedns, kubedns or none
|
||||||
dns_mode: dnsmasq_kubedns
|
dns_mode: kubedns
|
||||||
# Can be docker_dns, host_resolvconf or none
|
# Can be docker_dns, host_resolvconf or none
|
||||||
resolvconf_mode: docker_dns
|
resolvconf_mode: docker_dns
|
||||||
# Deploy netchecker app to verify DNS resolve as an HTTP service
|
# Deploy netchecker app to verify DNS resolve as an HTTP service
|
||||||
|
|
|
@ -3,7 +3,8 @@
|
||||||
nat_outgoing: true
|
nat_outgoing: true
|
||||||
|
|
||||||
# Use IP-over-IP encapsulation across hosts
|
# Use IP-over-IP encapsulation across hosts
|
||||||
ipip: false
|
ipip: true
|
||||||
|
ipip_mode: always # change to "cross-subnet" if you only want ipip encapsulation on traffic going across subnets
|
||||||
|
|
||||||
# Set to true if you want your calico cni binaries to overwrite the
|
# Set to true if you want your calico cni binaries to overwrite the
|
||||||
# ones from hyperkube while leaving other cni plugins intact.
|
# ones from hyperkube while leaving other cni plugins intact.
|
||||||
|
|
|
@ -97,7 +97,7 @@
|
||||||
shell: >
|
shell: >
|
||||||
echo '{
|
echo '{
|
||||||
"kind": "ipPool",
|
"kind": "ipPool",
|
||||||
"spec": {"disabled": false, "ipip": {"enabled": {{ cloud_provider is defined or ipip }}},
|
"spec": {"disabled": false, "ipip": {"enabled": {{ ipip }}, "mode": "{{ ipip_mode }}"},
|
||||||
"nat-outgoing": {{ nat_outgoing|default(false) and not peer_with_router|default(false) }}},
|
"nat-outgoing": {{ nat_outgoing|default(false) and not peer_with_router|default(false) }}},
|
||||||
"apiVersion": "v1",
|
"apiVersion": "v1",
|
||||||
"metadata": {"cidr": "{{ kube_pods_subnet }}"}
|
"metadata": {"cidr": "{{ kube_pods_subnet }}"}
|
||||||
|
@ -114,8 +114,7 @@
|
||||||
run_once: true
|
run_once: true
|
||||||
set_fact:
|
set_fact:
|
||||||
ipip_arg: "--ipip"
|
ipip_arg: "--ipip"
|
||||||
when: (legacy_calicoctl and
|
when: (legacy_calicoctl and ipip )
|
||||||
cloud_provider is defined or ipip)
|
|
||||||
tags: facts
|
tags: facts
|
||||||
|
|
||||||
- name: Calico (old) | Define nat-outgoing pool argument
|
- name: Calico (old) | Define nat-outgoing pool argument
|
||||||
|
|
|
@ -83,6 +83,15 @@
|
||||||
- /etc/dhcp/dhclient.d/zdnsupdate.sh
|
- /etc/dhcp/dhclient.d/zdnsupdate.sh
|
||||||
- /etc/dhcp/dhclient-exit-hooks.d/zdnsupdate
|
- /etc/dhcp/dhclient-exit-hooks.d/zdnsupdate
|
||||||
- "{{ bin_dir }}/kubelet"
|
- "{{ bin_dir }}/kubelet"
|
||||||
|
- "{{ bin_dir }}/kubernetes-scripts"
|
||||||
|
- /run/flannel
|
||||||
|
- /etc/flannel
|
||||||
|
- /run/kubernetes
|
||||||
|
- /usr/local/share/ca-certificates/kube-ca.crt
|
||||||
|
- /usr/local/share/ca-certificates/etcd-ca.crt
|
||||||
|
- /etc/ssl/certs/kube-ca.pem
|
||||||
|
- /etc/ssl/certs/etcd-ca.pem
|
||||||
|
- /var/log/pods/
|
||||||
tags: ['files']
|
tags: ['files']
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -3,4 +3,5 @@
|
||||||
- name: Uncordon node
|
- name: Uncordon node
|
||||||
command: "{{ bin_dir }}/kubectl uncordon {{ inventory_hostname }}"
|
command: "{{ bin_dir }}/kubectl uncordon {{ inventory_hostname }}"
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
when: needs_cordoning|default(false)
|
when: (needs_cordoning|default(false)) and ( {%- if inventory_hostname in groups['kube-node'] -%} true {%- else -%} false {%- endif -%} )
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
drain_grace_period: 30
|
drain_grace_period: 90
|
||||||
drain_timeout: 40s
|
drain_timeout: 120s
|
||||||
|
|
||||||
|
|
|
@ -67,6 +67,7 @@
|
||||||
- { role: kubernetes/node, tags: node }
|
- { role: kubernetes/node, tags: node }
|
||||||
- { role: kubernetes/master, tags: master }
|
- { role: kubernetes/master, tags: master }
|
||||||
- { role: network_plugin, tags: network }
|
- { role: network_plugin, tags: network }
|
||||||
|
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
||||||
|
|
||||||
#Finally handle worker upgrades, based on given batch size
|
#Finally handle worker upgrades, based on given batch size
|
||||||
- hosts: kube-node:!kube-master
|
- hosts: kube-node:!kube-master
|
||||||
|
|
Loading…
Reference in a new issue