From f8283d8c56b13fe86c7839fc7a79d45b204f1d35 Mon Sep 17 00:00:00 2001 From: Pablo Moreno Date: Thu, 2 Mar 2017 23:58:07 +0000 Subject: [PATCH 1/8] Restores working order of contrib/terraform/openstack, includes vault group and avoids group_vars/k8s-cluster.yml --- contrib/terraform/openstack/group_vars | 1 + contrib/terraform/openstack/kubespray.tf | 12 ++++++------ 2 files changed, 7 insertions(+), 6 deletions(-) create mode 120000 contrib/terraform/openstack/group_vars diff --git a/contrib/terraform/openstack/group_vars b/contrib/terraform/openstack/group_vars new file mode 120000 index 000000000..d64da8dc6 --- /dev/null +++ b/contrib/terraform/openstack/group_vars @@ -0,0 +1 @@ +../../../inventory/group_vars \ No newline at end of file diff --git a/contrib/terraform/openstack/kubespray.tf b/contrib/terraform/openstack/kubespray.tf index 42d529d64..0f31b3d16 100644 --- a/contrib/terraform/openstack/kubespray.tf +++ b/contrib/terraform/openstack/kubespray.tf @@ -68,7 +68,7 @@ resource "openstack_compute_instance_v2" "k8s_master" { floating_ip = "${element(openstack_networking_floatingip_v2.k8s_master.*.address, count.index)}" metadata = { ssh_user = "${var.ssh_user}" - kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster" + kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster,vault" } } @@ -87,10 +87,10 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" { "${openstack_compute_secgroup_v2.k8s.name}" ] metadata = { ssh_user = "${var.ssh_user}" - kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster" + kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster,vault,no-floating" } provisioner "local-exec" { - command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/k8s-cluster.yml" + command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/no-floating.yml" } } @@ -107,7 +107,7 @@ resource "openstack_compute_instance_v2" "k8s_node" { floating_ip = "${element(openstack_networking_floatingip_v2.k8s_node.*.address, count.index)}" metadata = { ssh_user = "${var.ssh_user}" - kubespray_groups = "kube-node,k8s-cluster" + kubespray_groups = "kube-node,k8s-cluster,vault" } } @@ -123,10 +123,10 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" { security_groups = ["${openstack_compute_secgroup_v2.k8s.name}" ] metadata = { ssh_user = "${var.ssh_user}" - kubespray_groups = "kube-node,k8s-cluster" + kubespray_groups = "kube-node,k8s-cluster,vault,no-floating" } provisioner "local-exec" { - command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/k8s-cluster.yml" + command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/no-floating.yml" } } From bb66bb19e83c0dcfd2d0bde1851d5d2a976290ae Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Tue, 14 Mar 2017 17:23:29 +0300 Subject: [PATCH 2/8] Fix etcd idempotency --- roles/etcd/tasks/check_certs.yml | 2 +- roles/etcd/tasks/gen_certs_script.yml | 18 ++++++++++-------- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/roles/etcd/tasks/check_certs.yml b/roles/etcd/tasks/check_certs.yml index 9bb32f162..139254734 100644 --- a/roles/etcd/tasks/check_certs.yml +++ b/roles/etcd/tasks/check_certs.yml @@ -61,7 +61,7 @@ {% if gen_node_certs[inventory_hostname] or (not etcdcert_node.results[0].stat.exists|default(False)) or (not etcdcert_node.results[1].stat.exists|default(False)) or - (etcdcert_node.results[1].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcdcert_node.results[1].stat.path)|first|map(attribute="checksum")|default('')) -%} + (etcdcert_node.results[1].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcdcert_node.results[1].stat.path)|map(attribute="checksum")|first|default('')) -%} {%- set _ = certs.update({'sync': True}) -%} {% endif %} {{ certs.sync }} diff --git a/roles/etcd/tasks/gen_certs_script.yml b/roles/etcd/tasks/gen_certs_script.yml index 06d86257c..ac6f906cf 100644 --- a/roles/etcd/tasks/gen_certs_script.yml +++ b/roles/etcd/tasks/gen_certs_script.yml @@ -4,7 +4,8 @@ path: "{{ etcd_cert_dir }}" group: "{{ etcd_cert_group }}" state: directory - owner: root + owner: kube + mode: 0700 recurse: yes - name: "Gen_certs | create etcd script dir (on {{groups['etcd'][0]}})" @@ -12,6 +13,7 @@ path: "{{ etcd_script_dir }}" state: directory owner: root + mode: 0700 run_once: yes delegate_to: "{{groups['etcd'][0]}}" @@ -20,8 +22,9 @@ path: "{{ etcd_cert_dir }}" group: "{{ etcd_cert_group }}" state: directory - owner: root + owner: kube recurse: yes + mode: 0700 run_once: yes delegate_to: "{{groups['etcd'][0]}}" @@ -42,6 +45,7 @@ delegate_to: "{{groups['etcd'][0]}}" when: gen_certs|default(false) + - name: Gen_certs | run cert generation script command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}" environment: @@ -114,7 +118,9 @@ - name: Gen_certs | Prepare tempfile for unpacking certs shell: mktemp /tmp/certsXXXXX.tar.gz register: cert_tempfile - + when: inventory_hostname in groups['etcd'] and sync_certs|default(false) and + inventory_hostname != groups['etcd'][0] + - name: Gen_certs | Write master certs to tempfile copy: content: "{{etcd_master_cert_data.stdout}}" @@ -154,13 +160,9 @@ group: "{{ etcd_cert_group }}" state: directory owner: kube + mode: "u=rwX,g-rwx,o-rwx" recurse: yes -- name: Gen_certs | set permissions on keys - shell: chmod 0600 {{ etcd_cert_dir}}/*key.pem - when: inventory_hostname in groups['etcd'] - changed_when: false - - name: Gen_certs | target ca-certificate store file set_fact: ca_cert_path: |- From 25b366dd989a9769bbc49d64510a685a7da2f652 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Mon, 6 Mar 2017 13:22:13 +0300 Subject: [PATCH 3/8] Migrate k8s data to etcd3 api store Default backend is now etcd3 (was etcd2). The migration process consists of the following steps: * check if migration is necessary * stop etcd on first etcd server * run migration script * start etcd on first etcd server * stop kube-apiserver until configuration is updated * update kube-apiserver * purge old etcdv2 data --- roles/kubernetes/master/defaults/main.yml | 4 +- roles/kubernetes/master/tasks/main.yml | 4 ++ .../kubernetes/master/tasks/post-upgrade.yml | 6 +++ roles/kubernetes/master/tasks/pre-upgrade.yml | 53 +++++++++++++++++-- 4 files changed, 62 insertions(+), 5 deletions(-) create mode 100644 roles/kubernetes/master/tasks/post-upgrade.yml diff --git a/roles/kubernetes/master/defaults/main.yml b/roles/kubernetes/master/defaults/main.yml index 527b168b9..659dcc847 100644 --- a/roles/kubernetes/master/defaults/main.yml +++ b/roles/kubernetes/master/defaults/main.yml @@ -13,6 +13,9 @@ kube_apiserver_node_port_range: "30000-32767" etcd_config_dir: /etc/ssl/etcd etcd_cert_dir: "{{ etcd_config_dir }}/ssl" +# ETCD backend for k8s data +kube_apiserver_storage_backend: etcd3 + # Limits for kube components kube_controller_memory_limit: 512M kube_controller_cpu_limit: 250m @@ -29,7 +32,6 @@ kube_apiserver_memory_limit: 2000M kube_apiserver_cpu_limit: 800m kube_apiserver_memory_requests: 256M kube_apiserver_cpu_requests: 300m -kube_apiserver_storage_backend: etcd2 ## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ ## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml index 67a64d4a6..baf3b5c7c 100644 --- a/roles/kubernetes/master/tasks/main.yml +++ b/roles/kubernetes/master/tasks/main.yml @@ -70,3 +70,7 @@ dest: "{{ kube_manifest_dir }}/kube-scheduler.manifest" notify: Master | wait for kube-scheduler tags: kube-scheduler + +- include: post-upgrade.yml + tags: k8s-post-upgrade + diff --git a/roles/kubernetes/master/tasks/post-upgrade.yml b/roles/kubernetes/master/tasks/post-upgrade.yml new file mode 100644 index 000000000..07fc57b96 --- /dev/null +++ b/roles/kubernetes/master/tasks/post-upgrade.yml @@ -0,0 +1,6 @@ +--- +- name: "Post-upgrade | etcd3 upgrade | purge etcd2 k8s data" + command: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_access_addresses }} rm -r /registry" + environment: + ETCDCTL_API: 2 + when: kube_apiserver_storage_backend == "etcd3" and needs_etcd_migration|bool|default(false) diff --git a/roles/kubernetes/master/tasks/pre-upgrade.yml b/roles/kubernetes/master/tasks/pre-upgrade.yml index 1bb0c0344..244c8b13e 100644 --- a/roles/kubernetes/master/tasks/pre-upgrade.yml +++ b/roles/kubernetes/master/tasks/pre-upgrade.yml @@ -32,19 +32,64 @@ stat: path: /etc/kubernetes/manifests/kube-apiserver.manifest register: kube_apiserver_manifest - when: secret_changed|default(false) or etcd_secret_changed|default(false) -- name: "Pre-upgrade | Write invalid image to kube-apiserver manifest if secrets were changed" +- name: "Pre-upgrade | etcd3 upgrade | see if old config exists" + command: "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses }} ls /registry/minions" + environment: + ETCDCTL_API: 2 + register: old_data_exists + delegate_to: "{{groups['kube-master'][0]}}" + when: kube_apiserver_storage_backend == "etcd3" + failed_when: false + +- name: "Pre-upgrade | etcd3 upgrade | see if data was already migrated" + command: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_access_addresses }} get --limit=1 --prefix=true /registry/minions" + environment: + ETCDCTL_API: 3 + register: data_migrated + delegate_to: "{{groups['etcd'][0]}}" + when: kube_apiserver_storage_backend == "etcd3" + failed_when: false + +- name: "Pre-upgrade | etcd3 upgrade | set needs_etcd_migration" + set_fact: + needs_etcd_migration: "{{ kube_apiserver_storage_backend == 'etcd3' and data_migrated.stdout_lines|length == 0 and old_data_exists.rc == 0 }}" + +- name: "Pre-upgrade | Write invalid image to kube-apiserver manifest if necessary" replace: dest: /etc/kubernetes/manifests/kube-apiserver.manifest regexp: '(\s+)image:\s+.*?$' replace: '\1image: kill.apiserver.using.fake.image.in:manifest' register: kube_apiserver_manifest_replaced - when: (secret_changed|default(false) or etcd_secret_changed|default(false)) and kube_apiserver_manifest.stat.exists + when: (secret_changed|default(false) or etcd_secret_changed|default(false) or needs_etcd_migration|bool) and kube_apiserver_manifest.stat.exists - name: "Pre-upgrade | Pause while waiting for kubelet to delete kube-apiserver pod" pause: seconds: 20 - when: (secret_changed|default(false) or etcd_secret_changed|default(false)) and kube_apiserver_manifest.stat.exists + when: kube_apiserver_manifest_replaced.changed tags: kube-apiserver +- name: "Pre-upgrade | etcd3 upgrade | stop etcd" + service: + name: etcd + state: stopped + delegate_to: "{{item}}" + with_items: "{{groups['etcd']}}" + when: needs_etcd_migration|bool + +- name: "Pre-upgrade | etcd3 upgrade | migrate data" + command: "{{ bin_dir }}/etcdctl migrate --data-dir=\"{{ etcd_data_dir }}\" --wal-dir=\"{{ etcd_data_dir }}/member/wal\"" + environment: + ETCDCTL_API: 3 + delegate_to: "{{item}}" + with_items: "{{groups['etcd']}}" + register: etcd_migrated + when: needs_etcd_migration|bool + +- name: "Pre-upgrade | etcd3 upgrade | start etcd" + service: + name: etcd + state: started + delegate_to: "{{item}}" + with_items: "{{groups['etcd']}}" + when: needs_etcd_migration|bool From ea1f072c7e47a526ab28a05b0787be503bc78fb8 Mon Sep 17 00:00:00 2001 From: Vincent Schwarzer Date: Mon, 27 Feb 2017 14:15:50 +0100 Subject: [PATCH 4/8] Granular authentication Control It is now possible to deactivate selected authentication methods (basic auth, token auth) inside the cluster by adding removing the required arguments to the Kube API Server and generating the secrets accordingly. The x509 authentification is currently not optional because disabling it would affect the kubectl clients deployed on the master nodes. --- inventory/group_vars/k8s-cluster.yml | 10 ++++++++-- roles/kubernetes/master/defaults/main.yml | 8 +++++++- .../templates/manifests/kube-apiserver.manifest.j2 | 4 ++++ roles/kubernetes/secrets/tasks/check-tokens.yml | 2 +- roles/kubernetes/secrets/tasks/main.yml | 2 +- 5 files changed, 21 insertions(+), 5 deletions(-) diff --git a/inventory/group_vars/k8s-cluster.yml b/inventory/group_vars/k8s-cluster.yml index 73721d03b..dce804ea9 100644 --- a/inventory/group_vars/k8s-cluster.yml +++ b/inventory/group_vars/k8s-cluster.yml @@ -58,9 +58,16 @@ kube_users: role: admin + +## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth) +#kube_oidc_auth: false +#kube_basic_auth: false +#kube_token_auth: false + + ## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ ## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) -# kube_oidc_auth: false + # kube_oidc_url: https:// ... # kube_oidc_client_id: kubernetes ## Optional settings for OIDC @@ -69,7 +76,6 @@ kube_users: # kube_oidc_groups_claim: groups - # Choose network plugin (calico, weave or flannel) # Can also be set to 'cloud', which lets the cloud provider setup appropriate routing kube_network_plugin: calico diff --git a/roles/kubernetes/master/defaults/main.yml b/roles/kubernetes/master/defaults/main.yml index 527b168b9..f719a1138 100644 --- a/roles/kubernetes/master/defaults/main.yml +++ b/roles/kubernetes/master/defaults/main.yml @@ -31,9 +31,15 @@ kube_apiserver_memory_requests: 256M kube_apiserver_cpu_requests: 300m kube_apiserver_storage_backend: etcd2 + +## Enable/Disable Kube API Server Authentication Methods +kube_basic_auth: true +kube_token_auth: true +kube_oidc_auth: false + ## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ ## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) -kube_oidc_auth: false + #kube_oidc_url: https:// ... # kube_oidc_client_id: kubernetes ## Optional settings for OIDC diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index 96a0c738a..65a30929b 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -34,10 +34,14 @@ spec: - --service-cluster-ip-range={{ kube_service_addresses }} - --service-node-port-range={{ kube_apiserver_node_port_range }} - --client-ca-file={{ kube_cert_dir }}/ca.pem +{% if kube_basic_auth|default(true) %} - --basic-auth-file={{ kube_users_dir }}/known_users.csv +{% endif %} - --tls-cert-file={{ kube_cert_dir }}/apiserver.pem - --tls-private-key-file={{ kube_cert_dir }}/apiserver-key.pem +{% if kube_token_auth|default(true) %} - --token-auth-file={{ kube_token_dir }}/known_tokens.csv +{% endif %} - --service-account-key-file={{ kube_cert_dir }}/apiserver-key.pem {% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %} - --oidc-issuer-url={{ kube_oidc_url }} diff --git a/roles/kubernetes/secrets/tasks/check-tokens.yml b/roles/kubernetes/secrets/tasks/check-tokens.yml index 14cfbb124..16c3e4357 100644 --- a/roles/kubernetes/secrets/tasks/check-tokens.yml +++ b/roles/kubernetes/secrets/tasks/check-tokens.yml @@ -14,7 +14,7 @@ - name: "Check_tokens | Set 'sync_tokens' and 'gen_tokens' to true" set_fact: gen_tokens: true - when: not known_tokens_master.stat.exists + when: not known_tokens_master.stat.exists and kube_token_auth|default(true) run_once: true - name: "Check tokens | check if a cert already exists" diff --git a/roles/kubernetes/secrets/tasks/main.yml b/roles/kubernetes/secrets/tasks/main.yml index 6da147170..919ed0df7 100644 --- a/roles/kubernetes/secrets/tasks/main.yml +++ b/roles/kubernetes/secrets/tasks/main.yml @@ -33,7 +33,7 @@ line: '{{ item.value.pass }},{{ item.key }},{{ item.value.role }}' backup: yes with_dict: "{{ kube_users }}" - when: inventory_hostname in "{{ groups['kube-master'] }}" + when: inventory_hostname in "{{ groups['kube-master'] }}" and kube_basic_auth|default(true) notify: set secret_changed # From 97a7f1c4a520def1b5449bbcb2d95da307c54b9f Mon Sep 17 00:00:00 2001 From: Sergii Golovatiuk Date: Tue, 14 Mar 2017 17:26:42 +0100 Subject: [PATCH 5/8] Turn on iptables for flannel Closes: #1135 Closes: #1026 Signed-off-by: Sergii Golovatiuk --- inventory/group_vars/k8s-cluster.yml | 2 +- roles/docker/templates/docker-options.conf.j2 | 2 +- roles/kargo-defaults/defaults/main.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/inventory/group_vars/k8s-cluster.yml b/inventory/group_vars/k8s-cluster.yml index 50bbee230..02fad056d 100644 --- a/inventory/group_vars/k8s-cluster.yml +++ b/inventory/group_vars/k8s-cluster.yml @@ -102,7 +102,7 @@ docker_daemon_graph: "/var/lib/docker" ## This string should be exactly as you wish it to appear. ## An obvious use case is allowing insecure-registry access ## to self hosted registries like so: -docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} --iptables=false" +docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }}" docker_bin_dir: "/usr/bin" # Settings for containerized control plane (etcd/kubelet/secrets) diff --git a/roles/docker/templates/docker-options.conf.j2 b/roles/docker/templates/docker-options.conf.j2 index 012795898..0113bfc61 100644 --- a/roles/docker/templates/docker-options.conf.j2 +++ b/roles/docker/templates/docker-options.conf.j2 @@ -1,2 +1,2 @@ [Service] -Environment="DOCKER_OPTS={% if docker_options is defined %}{{ docker_options }}{% endif %}" \ No newline at end of file +Environment="DOCKER_OPTS={% if docker_options is defined %}{{ docker_options }}{% endif %} --iptables={% if kube_network_plugin == 'flannel' %}true{% else %}false{% endif %}" diff --git a/roles/kargo-defaults/defaults/main.yaml b/roles/kargo-defaults/defaults/main.yaml index a2ec34cb7..ecafb1682 100644 --- a/roles/kargo-defaults/defaults/main.yaml +++ b/roles/kargo-defaults/defaults/main.yaml @@ -101,7 +101,7 @@ docker_daemon_graph: "/var/lib/docker" ## This string should be exactly as you wish it to appear. ## An obvious use case is allowing insecure-registry access ## to self hosted registries like so: -docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} --iptables=false" +docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }}" # Settings for containerized control plane (etcd/kubelet/secrets) etcd_deployment_type: docker From 4287993811320efb124b347b5651a937a43c9673 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Tue, 14 Mar 2017 21:02:00 +0300 Subject: [PATCH 6/8] Make resolvconf preinstall idempotent --- .../preinstall/tasks/resolvconf.yml | 50 ++++++++++--------- 1 file changed, 26 insertions(+), 24 deletions(-) diff --git a/roles/kubernetes/preinstall/tasks/resolvconf.yml b/roles/kubernetes/preinstall/tasks/resolvconf.yml index 55edd0ca7..6369dfd9c 100644 --- a/roles/kubernetes/preinstall/tasks/resolvconf.yml +++ b/roles/kubernetes/preinstall/tasks/resolvconf.yml @@ -3,25 +3,16 @@ command: cp -f /etc/resolv.conf "{{ resolvconffile }}" when: ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -- name: Remove search/domain/nameserver options - lineinfile: - dest: "{{item[0]}}" - state: absent - regexp: "^{{ item[1] }}.*$" - backup: yes - follow: yes - with_nested: - - "{{ [resolvconffile] + [base|default('')] + [head|default('')] }}" - - [ 'search ', 'nameserver ', 'domain ', 'options ' ] - notify: Preinstall | restart network - -- name: Add domain/search/nameservers to resolv.conf +- name: Add domain/search/nameservers/options to resolv.conf blockinfile: dest: "{{resolvconffile}}" block: |- {% for item in [domainentry] + [searchentries] + nameserverentries.split(',') -%} {{ item }} {% endfor %} + options ndots:{{ ndots }} + options timeout:2 + options attempts:2 state: present insertbefore: BOF create: yes @@ -30,21 +21,32 @@ marker: "# Ansible entries {mark}" notify: Preinstall | restart network -- name: Add options to resolv.conf - lineinfile: - line: options {{ item }} - dest: "{{resolvconffile}}" - state: present - regexp: "^options.*{{ item }}$" - insertafter: EOF +- name: Remove search/domain/nameserver options before block + replace: + dest: "{{item[0]}}" + regexp: '^{{ item[1] }}[^#]*(?=# Ansible entries BEGIN)' backup: yes follow: yes - with_items: - - ndots:{{ ndots }} - - timeout:2 - - attempts:2 + with_nested: + - "{{ [resolvconffile] + [base|default('')] + [head|default('')] }}" + - [ 'search ', 'nameserver ', 'domain ', 'options ' ] + when: item[0] != "" notify: Preinstall | restart network +- name: Remove search/domain/nameserver options after block + replace: + dest: "{{item[0]}}" + regexp: '(# Ansible entries END\n(?:(?!^{{ item[1] }}).*\n)*)(?:^{{ item[1] }}.*\n?)+' + replace: '\1' + backup: yes + follow: yes + with_nested: + - "{{ [resolvconffile] + [base|default('')] + [head|default('')] }}" + - [ 'search ', 'nameserver ', 'domain ', 'options ' ] + when: item[0] != "" + notify: Preinstall | restart network + + - name: get temporary resolveconf cloud init file content command: cat {{ resolvconffile }} register: cloud_config From eabea728c6b3ac172107149fab37f1e92250c5c1 Mon Sep 17 00:00:00 2001 From: Vincent Schwarzer Date: Wed, 15 Mar 2017 13:04:01 +0100 Subject: [PATCH 7/8] Fixed CoreOS Docu CoreOS docu was referencing outdated bootstrap playbook that is now part of kargo itself. --- docs/coreos.md | 8 -------- 1 file changed, 8 deletions(-) diff --git a/docs/coreos.md b/docs/coreos.md index e38369aef..7c9b2c8a6 100644 --- a/docs/coreos.md +++ b/docs/coreos.md @@ -13,12 +13,4 @@ Before running the cluster playbook you must satisfy the following requirements: * On each CoreOS nodes a writable directory **/opt/bin** (~400M disk space) -* Uncomment the variable **ansible\_python\_interpreter** in the file `inventory/group_vars/all.yml` - -* run the Python bootstrap playbook - -``` -ansible-playbook -u smana -e ansible_ssh_user=smana -b --become-user=root -i inventory/inventory.cfg coreos-bootstrap.yml -``` - Then you can proceed to [cluster deployment](#run-deployment) From 59f2934c532e326c3faa07854f86a0789c9e66ef Mon Sep 17 00:00:00 2001 From: Vincent Schwarzer Date: Wed, 15 Mar 2017 13:11:09 +0100 Subject: [PATCH 8/8] Added Jinja 2.8 to Docs Added Jinja 2.8 Requirements to docs and pip requirements file which is needed to run the current Ansible Playbooks. --- README.md | 1 + requirements.txt | 1 + 2 files changed, 2 insertions(+) diff --git a/README.md b/README.md index 963291bdf..9fee4ff32 100644 --- a/README.md +++ b/README.md @@ -69,6 +69,7 @@ Requirements * **Ansible v2.2 (or newer) and python-netaddr is installed on the machine that will run Ansible commands** +* **Jinja 2.8 (or newer) is required to run the Ansible Playbooks** * The target servers must have **access to the Internet** in order to pull docker images. * The target servers are configured to allow **IPv4 forwarding**. * **Your ssh key must be copied** to all the servers part of your inventory. diff --git a/requirements.txt b/requirements.txt index 6fd09e6c7..bf8e65e4a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,3 @@ ansible>=2.2.1 netaddr +jinja>=2.8