fix apply for netchecker upgrade (#1659)

* fix apply for netchecker upgrade and graceful upgrade

* Speed up daemonset upgrades. Make check wait for ds upgrades.
This commit is contained in:
Matthew Mosesohn 2017-09-15 13:19:37 +01:00 committed by GitHub
parent 8d766a2ca9
commit b294db5aed
9 changed files with 21 additions and 21 deletions

View file

@ -10,6 +10,7 @@
kube:
name: "netchecker-server"
namespace: "{{ netcheck_namespace }}"
filename: "{{ netchecker_server_manifest.stat.path }}"
kubectl: "{{bin_dir}}/kubectl"
resource: "deploy"
state: latest

View file

@ -42,6 +42,5 @@ spec:
memory: {{ netchecker_agent_memory_requests }}
updateStrategy:
rollingUpdate:
maxUnavailable: 1
maxUnavailable: 100%
type: RollingUpdate

View file

@ -46,5 +46,5 @@ spec:
memory: {{ netchecker_agent_memory_requests }}
updateStrategy:
rollingUpdate:
maxUnavailable: 1
maxUnavailable: 100%
type: RollingUpdate

View file

@ -13,22 +13,18 @@
kube_apiserver_storage_backend: "etcd2"
when: old_data_exists.rc == 0 and not force_etcd3|bool
- name: "Pre-upgrade | Delete master manifests on all kube-masters"
- name: "Pre-upgrade | Delete master manifests"
file:
path: "/etc/kubernetes/manifests/{{item[1]}}.manifest"
path: "/etc/kubernetes/manifests/{{item}}.manifest"
state: absent
delegate_to: "{{item[0]}}"
with_nested:
- "{{groups['kube-master']}}"
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
register: kube_apiserver_manifest_replaced
when: (secret_changed|default(false) or etcd_secret_changed|default(false))
- name: "Pre-upgrade | Delete master containers forcefully on all kube-masters"
- name: "Pre-upgrade | Delete master containers forcefully"
shell: "docker ps -f name=k8s-{{item}}* -q | xargs --no-run-if-empty docker rm -f"
delegate_to: "{{item[0]}}"
with_nested:
- "{{groups['kube-master']}}"
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
when: kube_apiserver_manifest_replaced.changed
run_once: true

View file

@ -161,6 +161,6 @@ spec:
path: "{{ calico_cert_dir }}"
updateStrategy:
rollingUpdate:
maxUnavailable: 1
maxUnavailable: {{ serial | default('20%') }}
type: RollingUpdate

View file

@ -190,5 +190,5 @@ spec:
readOnly: true
updateStrategy:
rollingUpdate:
maxUnavailable: 1
maxUnavailable: {{ serial | default('20%') }}
type: RollingUpdate

View file

@ -121,5 +121,5 @@ spec:
path: /opt/cni/bin
updateStrategy:
rollingUpdate:
maxUnavailable: 1
maxUnavailable: {{ serial | default('20%') }}
type: RollingUpdate

View file

@ -156,6 +156,6 @@ items:
path: /lib/modules
updateStrategy:
rollingUpdate:
maxUnavailable: 1
maxUnavailable: {{ serial | default('20%') }}
type: RollingUpdate

View file

@ -12,16 +12,11 @@
bin_dir: "/usr/local/bin"
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
- name: Check kubectl output
shell: "{{bin_dir}}/kubectl get pods --all-namespaces -owide"
register: get_pods
- debug: msg="{{get_pods.stdout.split('\n')}}"
- name: Get pod names
shell: "{{bin_dir}}/kubectl get pods -o json"
register: pods
until: '"ContainerCreating" not in pods.stdout'
until: '"ContainerCreating" not in pods.stdout and "Terminating" not in pods.stdout'
retries: 60
delay: 2
no_log: true
@ -30,11 +25,20 @@
command: "{{bin_dir}}/kubectl get pods -o
jsonpath='{range .items[?(.spec.hostNetwork)]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
register: hostnet_pods
no_log: true
- name: Get running pods
command: "{{bin_dir}}/kubectl get pods -o
jsonpath='{range .items[?(.status.phase==\"Running\")]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
register: running_pods
no_log: true
- name: Check kubectl output
shell: "{{bin_dir}}/kubectl get pods --all-namespaces -owide"
register: get_pods
no_log: true
- debug: msg="{{get_pods.stdout.split('\n')}}"
- set_fact:
kube_pods_subnet: 10.233.64.0/18