Remove rkt support (#4671)

This commit is contained in:
Andreas Krüger 2019-04-29 10:14:20 +02:00 committed by Kubernetes Prow Robot
parent 741de6051c
commit 38af93b60c
32 changed files with 7 additions and 302 deletions

View file

@ -256,13 +256,6 @@ gce_coreos-kube-router:
except: ['triggers'] except: ['triggers']
only: ['master', /^pr-.*$/] only: ['master', /^pr-.*$/]
gce_ubuntu-rkt-sep:
stage: deploy-gce
<<: *gce
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
gce_ubuntu-kube-router-sep: gce_ubuntu-kube-router-sep:
stage: deploy-special stage: deploy-special
<<: *gce <<: *gce

View file

@ -115,13 +115,6 @@ packet_opensuse-canal:
except: ['triggers'] except: ['triggers']
only: ['master', /^pr-.*$/] only: ['master', /^pr-.*$/]
packet_ubuntu-rkt-sep:
stage: deploy-part2
<<: *packet
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
packet_ubuntu-kube-router-sep: packet_ubuntu-kube-router-sep:
stage: deploy-special stage: deploy-special
<<: *packet <<: *packet

View file

@ -111,7 +111,6 @@ Supported Components
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.14.1 - [kubernetes](https://github.com/kubernetes/kubernetes) v1.14.1
- [etcd](https://github.com/coreos/etcd) v3.2.26 - [etcd](https://github.com/coreos/etcd) v3.2.26
- [docker](https://www.docker.com/) v18.06 (see note) - [docker](https://www.docker.com/) v18.06 (see note)
- [rkt](https://github.com/rkt/rkt) v1.21.0 (see Note 2)
- [cri-o](http://cri-o.io/) v1.11.5 (experimental: see [CRI-O Note](docs/cri-o.md). Only on centos based OS) - [cri-o](http://cri-o.io/) v1.11.5 (experimental: see [CRI-O Note](docs/cri-o.md). Only on centos based OS)
- Network Plugin - Network Plugin
- [calico](https://github.com/projectcalico/calico) v3.4.0 - [calico](https://github.com/projectcalico/calico) v3.4.0
@ -131,11 +130,6 @@ Supported Components
Note: The list of validated [docker versions](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.13.md) was updated to 1.11.1, 1.12.1, 1.13.1, 17.03, 17.06, 17.09, 18.06. kubeadm now properly recognizes Docker 18.09.0 and newer, but still treats 18.06 as the default supported version. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin). Note: The list of validated [docker versions](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.13.md) was updated to 1.11.1, 1.12.1, 1.13.1, 17.03, 17.06, 17.09, 18.06. kubeadm now properly recognizes Docker 18.09.0 and newer, but still treats 18.06 as the default supported version. The kubelet might break on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
Note 2: rkt support as docker alternative is limited to control plane (etcd and
kubelet). Docker is still used for Kubernetes cluster workloads and network
plugins' related OS services. Also note, only one of the supported network
plugins can be deployed for a given single cluster.
Requirements Requirements
------------ ------------

View file

@ -1,7 +1,7 @@
--- ---
# Stop temporary Vault if it's running (can linger if playbook fails out) # Stop temporary Vault if it's running (can linger if playbook fails out)
- name: stop vault-temp container - name: stop vault-temp container
shell: docker stop {{ vault_temp_container_name }} || rkt stop {{ vault_temp_container_name }} shell: docker stop {{ vault_temp_container_name }}
failed_when: false failed_when: false
register: vault_temp_stop register: vault_temp_stop
changed_when: vault_temp_stop is succeeded changed_when: vault_temp_stop is succeeded

View file

@ -1,45 +0,0 @@
[Unit]
Description=hashicorp vault on rkt
Documentation=https://github.com/hashicorp/vault
Wants=network.target
[Service]
User=root
Restart=on-failure
RestartSec=10s
TimeoutStartSec=5
LimitNOFILE=40000
# Container has the following internal mount points:
# /vault/file/ # File backend storage location
# /vault/logs/ # Log files
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/vault.uuid
ExecStart=/usr/bin/rkt run \
--insecure-options=image \
--volume hosts,kind=host,source=/etc/hosts,readOnly=true \
--mount volume=hosts,target=/etc/hosts \
--volume=volume-vault-file,kind=host,source=/var/lib/vault \
--volume=volume-vault-logs,kind=host,source={{ vault_log_dir }} \
--volume=vault-cert-dir,kind=host,source={{ vault_cert_dir }} \
--mount=volume=vault-cert-dir,target={{ vault_cert_dir }} \
--volume=vault-conf-dir,kind=host,source={{ vault_config_dir }} \
--mount=volume=vault-conf-dir,target={{ vault_config_dir }} \
--volume=vault-secrets-dir,kind=host,source={{ vault_secrets_dir }} \
--mount=volume=vault-secrets-dir,target={{ vault_secrets_dir }} \
--volume=vault-roles-dir,kind=host,source={{ vault_roles_dir }} \
--mount=volume=vault-roles-dir,target={{ vault_roles_dir }} \
--volume=etcd-cert-dir,kind=host,source={{ etcd_cert_dir }} \
--mount=volume=etcd-cert-dir,target={{ etcd_cert_dir }} \
docker://{{ vault_image_repo }}:{{ vault_image_tag }} \
--uuid-file-save=/var/run/vault.uuid \
--name={{ vault_container_name }} \
--net=host \
--caps-retain=CAP_IPC_LOCK \
--exec vault -- \
server \
--config={{ vault_config_dir }}/config.json
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/vault.uuid
[Install]
WantedBy=multi-user.target

View file

@ -93,6 +93,6 @@ Potential Work
- Change the Vault role to not run certain tasks when ``root_token`` and - Change the Vault role to not run certain tasks when ``root_token`` and
``unseal_keys`` are not present. Alternatively, allow user input for these ``unseal_keys`` are not present. Alternatively, allow user input for these
values when missing. values when missing.
- Add the ability to start temp Vault with Host, Rkt, or Docker - Add the ability to start temp Vault with Host or Docker
- Add a dynamic way to change out the backend role creation during Bootstrap, - Add a dynamic way to change out the backend role creation during Bootstrap,
so other services can be used (such as Consul) so other services can be used (such as Consul)

View file

@ -70,7 +70,7 @@ The group variables to control main deployment options are located in the direct
Optional variables are located in the `inventory/sample/group_vars/all.yml`. Optional variables are located in the `inventory/sample/group_vars/all.yml`.
Mandatory variables that are common for at least one role (or a node group) can be found in the Mandatory variables that are common for at least one role (or a node group) can be found in the
`inventory/sample/group_vars/k8s-cluster.yml`. `inventory/sample/group_vars/k8s-cluster.yml`.
There are also role vars for docker, rkt, kubernetes preinstall and master roles. There are also role vars for docker, kubernetes preinstall and master roles.
According to the [ansible docs](http://docs.ansible.com/ansible/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable), According to the [ansible docs](http://docs.ansible.com/ansible/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable),
those cannot be overridden from the group vars. In order to override, one should use those cannot be overridden from the group vars. In order to override, one should use
the `-e ` runtime flags (most simple way) or other layers described in the docs. the `-e ` runtime flags (most simple way) or other layers described in the docs.

View file

@ -2,7 +2,7 @@ Kubespray's roadmap
================= =================
### Self deployment (pull-mode) [#320](https://github.com/kubespray/kubespray/issues/320) ### Self deployment (pull-mode) [#320](https://github.com/kubespray/kubespray/issues/320)
- the playbook would install and configure docker/rkt and the etcd cluster - the playbook would install and configure docker and the etcd cluster
- the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars. - the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars.
- a "kubespray" container would be deployed (kubespray-cli, ansible-playbook) - a "kubespray" container would be deployed (kubespray-cli, ansible-playbook)
- to be discussed, a way to provide the inventory - to be discussed, a way to provide the inventory

View file

@ -102,7 +102,7 @@ Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.m
proxy. Note that no_proxy defaults to all internal cluster IPs and hostnames proxy. Note that no_proxy defaults to all internal cluster IPs and hostnames
that correspond to each node. that correspond to each node.
* *kubelet_deployment_type* - Controls which platform to deploy kubelet on. * *kubelet_deployment_type* - Controls which platform to deploy kubelet on.
Available options are ``host``, ``rkt``, and ``docker``. ``docker`` mode Available options are ``host`` and ``docker``. ``docker`` mode
is unlikely to work on newer releases. Starting with Kubernetes v1.7 is unlikely to work on newer releases. Starting with Kubernetes v1.7
series, this now defaults to ``host``. Before v1.7, the default was Docker. series, this now defaults to ``host``. Before v1.7, the default was Docker.
This is because of cgroup [issues](https://github.com/kubernetes/kubernetes/issues/43704). This is because of cgroup [issues](https://github.com/kubernetes/kubernetes/issues/43704).

View file

@ -7,17 +7,9 @@ dependencies:
- container-engine - container-engine
- crio - crio
- role: container-engine/rkt
when:
- container_manager == 'rkt'
tags:
- container-engine
- rkt
- role: container-engine/docker - role: container-engine/docker
when: when:
- container_manager == 'docker' or container_manager == "rkt" - container_manager == 'docker'
tags: tags:
- container-engine - container-engine
- docker - docker
- rkt

View file

@ -1,6 +0,0 @@
---
rkt_version: 1.21.0
rkt_pkg_version: "{{ rkt_version }}-1"
rkt_download_src: https://github.com/coreos/rkt
rkt_download_url: "{{ rkt_download_src }}/releases/download/v{{ rkt_version }}"

View file

@ -1,2 +0,0 @@
#!/bin/bash
rkt gc

View file

@ -1,54 +0,0 @@
---
- name: gather os specific variables for rkt
include_vars: "{{ item }}"
with_first_found:
- files:
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}.yml"
- "{{ ansible_os_family|lower }}.yml"
- defaults.yml
paths:
- ../vars
skip: true
tags:
- facts
- name: install rkt pkg on ubuntu
apt:
deb: "{{ rkt_download_url }}/{{ rkt_pkg_name }}"
state: present
register: rkt_task_result
until: rkt_task_result is succeeded
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
when: ansible_os_family == "Debian"
- name: install rkt pkg on fedora
dnf:
name: rkt
state: present
when: ansible_distribution == "Fedora"
- name: install rkt pkg on centos
yum:
pkg: "{{ rkt_download_url }}/{{ rkt_pkg_name }}"
state: present
register: rkt_task_result
until: rkt_task_result is succeeded
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
when:
- ansible_os_family == "RedHat"
- ansible_distribution != "Fedora"
- name: install rkt pkg on openSUSE
zypper:
name: "{{ rkt_download_url }}/{{ rkt_pkg_name }}"
state: present
register: rkt_task_result
until: rkt_task_result is succeeded
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
when: ansible_os_family == "Suse"

View file

@ -1,13 +0,0 @@
---
- name: Install rkt
import_tasks: install.yml
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
- name: Set up cron job to do garbage cleanup
copy:
src: rkt-gc.sh
dest: /etc/cron.hourly/rkt-gc.sh
owner: root
group: root
mode: 0750
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]

View file

@ -1,2 +0,0 @@
---
rkt_pkg_name: "rkt_{{ rkt_pkg_version }}_amd64.deb"

View file

@ -1,2 +0,0 @@
---
rkt_pkg_name: "rkt-{{ rkt_pkg_version }}.x86_64.rpm"

View file

@ -1,2 +0,0 @@
---
rkt_pkg_name: "rkt-{{ rkt_pkg_version }}.x86_64.rpm"

View file

@ -1,2 +0,0 @@
---
rkt_pkg_name: "rkt-{{ rkt_pkg_version }}.x86_64.rpm"

View file

@ -8,8 +8,6 @@ kind: InitConfiguration
nodeRegistration: nodeRegistration:
{% if container_manager == 'crio' %} {% if container_manager == 'crio' %}
criSocket: /var/run/crio/crio.sock criSocket: /var/run/crio/crio.sock
{% elif container_manager == 'rkt' %}
criSocket: /var/run/rkt.sock
{% else %} {% else %}
criSocket: /var/run/dockershim.sock criSocket: /var/run/dockershim.sock
{% endif %} {% endif %}
@ -41,8 +39,6 @@ etcd:
nodeRegistration: nodeRegistration:
{% if container_manager == 'crio' %} {% if container_manager == 'crio' %}
criSocket: /var/run/crio/crio.sock criSocket: /var/run/crio/crio.sock
{% elif container_manager == 'rkt' %}
criSocket: /var/run/rkt.sock
{% else %} {% else %}
criSocket: /var/run/dockershim.sock criSocket: /var/run/dockershim.sock
{% endif %} {% endif %}

View file

@ -1,30 +0,0 @@
---
- name: Trust etcd container
command: >-
/usr/bin/rkt trust
--skip-fingerprint-review
--root
https://quay.io/aci-signing-key
register: etcd_rkt_trust_result
until: etcd_rkt_trust_result.rc == 0
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
changed_when: false
environment: "{{proxy_env}}"
when: etcd_cluster_setup
- name: Install | Copy etcdctl binary from rkt container
command: >-
/usr/bin/rkt run
--volume=bin-dir,kind=host,source={{ bin_dir}},readOnly=false
--mount=volume=bin-dir,target=/host/bin
{{ etcd_image_repo }}:{{ etcd_image_tag }}
--name=etcdctl-binarycopy
--exec=/bin/cp -- /usr/local/bin/etcdctl /host/bin/etcdctl
register: etcd_task_result
until: etcd_task_result.rc == 0
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
changed_when: false
environment: "{{proxy_env}}"
when: etcd_cluster_setup

View file

@ -1,31 +0,0 @@
[Unit]
Description=etcd events rkt wrapper
Documentation=https://github.com/coreos/etcd
Wants=network.target
[Service]
Restart=on-failure
RestartSec=10s
TimeoutStartSec=0
LimitNOFILE=40000
ExecStart=/usr/bin/rkt run \
--uuid-file-save=/var/run/etcd-events.uuid \
--volume hosts,kind=host,source=/etc/hosts,readOnly=true \
--mount volume=hosts,target=/etc/hosts \
--volume=etc-ssl-certs,kind=host,source=/etc/ssl/certs,readOnly=true \
--mount=volume=etc-ssl-certs,target=/etc/ssl/certs \
--volume=etcd-cert-dir,kind=host,source={{ etcd_cert_dir }},readOnly=true \
--mount=volume=etcd-cert-dir,target={{ etcd_cert_dir }} \
--volume=etcd-data-dir,kind=host,source={{ etcd_events_data_dir }},readOnly=false \
--mount=volume=etcd-data-dir,target={{ etcd_events_data_dir }} \
--set-env-file=/etc/etcd-events.env \
--stage1-from-dir=stage1-fly.aci \
{{ etcd_image_repo }}:{{ etcd_image_tag }} \
--name={{ etcd_member_name | default("etcd-events") }}
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/etcd-events.uuid
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/etcd-events.uuid
[Install]
WantedBy=multi-user.target

View file

@ -1,31 +0,0 @@
[Unit]
Description=etcd rkt wrapper
Documentation=https://github.com/coreos/etcd
Wants=network.target
[Service]
Restart=on-failure
RestartSec=10s
TimeoutStartSec=0
LimitNOFILE=40000
ExecStart=/usr/bin/rkt run \
--uuid-file-save=/var/run/etcd.uuid \
--volume hosts,kind=host,source=/etc/hosts,readOnly=true \
--mount volume=hosts,target=/etc/hosts \
--volume=etc-ssl-certs,kind=host,source=/etc/ssl/certs,readOnly=true \
--mount=volume=etc-ssl-certs,target=/etc/ssl/certs \
--volume=etcd-cert-dir,kind=host,source={{ etcd_cert_dir }},readOnly=true \
--mount=volume=etcd-cert-dir,target={{ etcd_cert_dir }} \
--volume=etcd-data-dir,kind=host,source={{ etcd_data_dir }},readOnly=false \
--mount=volume=etcd-data-dir,target={{ etcd_data_dir }} \
--set-env-file=/etc/etcd.env \
--stage1-from-dir=stage1-fly.aci \
{{ etcd_image_repo }}:{{ etcd_image_tag }} \
--name={{ etcd_member_name | default("etcd") }}
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/etcd.uuid
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/etcd.uuid
[Install]
WantedBy=multi-user.target

View file

@ -18,8 +18,6 @@ nodeRegistration:
name: {{ kube_override_hostname }} name: {{ kube_override_hostname }}
{% if container_manager == 'crio' %} {% if container_manager == 'crio' %}
criSocket: /var/run/crio/crio.sock criSocket: /var/run/crio/crio.sock
{% elif container_manager == 'rkt' %}
criSocket: /var/run/rkt.sock
{% else %} {% else %}
criSocket: /var/run/dockershim.sock criSocket: /var/run/dockershim.sock
{% endif %} {% endif %}

View file

@ -18,8 +18,6 @@ nodeRegistration:
name: {{ kube_override_hostname }} name: {{ kube_override_hostname }}
{% if container_manager == 'crio' %} {% if container_manager == 'crio' %}
criSocket: /var/run/crio/crio.sock criSocket: /var/run/crio/crio.sock
{% elif container_manager == 'rkt' %}
criSocket: /var/run/rkt.sock
{% else %} {% else %}
criSocket: /var/run/dockershim.sock criSocket: /var/run/dockershim.sock
{% endif %} {% endif %}

View file

@ -16,8 +16,6 @@ nodeRegistration:
name: {{ kube_override_hostname }} name: {{ kube_override_hostname }}
{% if container_manager == 'crio' %} {% if container_manager == 'crio' %}
criSocket: /var/run/crio/crio.sock criSocket: /var/run/crio/crio.sock
{% elif container_manager == 'rkt' %}
criSocket: /var/run/rkt.sock
{% else %} {% else %}
criSocket: /var/run/dockershim.sock criSocket: /var/run/dockershim.sock
{% endif %} {% endif %}

View file

@ -230,8 +230,6 @@ nodeRegistration:
{% endif %} {% endif %}
{% if container_manager == 'crio' %} {% if container_manager == 'crio' %}
criSocket: /var/run/crio/crio.sock criSocket: /var/run/crio/crio.sock
{% elif container_manager == 'rkt' %}
criSocket: /var/run/rkt.sock
{% else %} {% else %}
criSocket: /var/run/dockershim.sock criSocket: /var/run/dockershim.sock
{% endif %} {% endif %}

View file

@ -16,8 +16,6 @@ nodeRegistration:
{% endif %} {% endif %}
{% if container_manager == 'crio' %} {% if container_manager == 'crio' %}
criSocket: /var/run/crio/crio.sock criSocket: /var/run/crio/crio.sock
{% elif container_manager == 'rkt' %}
criSocket: /var/run/rkt.sock
{% else %} {% else %}
criSocket: /var/run/dockershim.sock criSocket: /var/run/dockershim.sock
{% endif %} {% endif %}

View file

@ -16,8 +16,6 @@ nodeRegistration:
{% endif %} {% endif %}
{% if container_manager == 'crio' %} {% if container_manager == 'crio' %}
criSocket: /var/run/crio/crio.sock criSocket: /var/run/crio/crio.sock
{% elif container_manager == 'rkt' %}
criSocket: /var/run/rkt.sock
{% else %} {% else %}
criSocket: /var/run/dockershim.sock criSocket: /var/run/dockershim.sock
{% endif %} {% endif %}

View file

@ -19,8 +19,6 @@ nodeRegistration:
name: {{ inventory_hostname }} name: {{ inventory_hostname }}
{% if container_manager == 'crio' %} {% if container_manager == 'crio' %}
criSocket: /var/run/crio/crio.sock criSocket: /var/run/crio/crio.sock
{% elif container_manager == 'rkt' %}
criSocket: /var/run/rkt.sock
{% else %} {% else %}
criSocket: /var/run/dockershim.sock criSocket: /var/run/dockershim.sock
{% endif %} {% endif %}

View file

@ -215,7 +215,6 @@
- "{{ bin_dir }}/calicoctl" - "{{ bin_dir }}/calicoctl"
- "{{ bin_dir }}/calico-upgrade" - "{{ bin_dir }}/calico-upgrade"
- "{{ bin_dir }}/weave" - "{{ bin_dir }}/weave"
- /var/lib/rkt
- /var/lib/cni - /var/lib/cni
- /etc/vault - /etc/vault
- /etc/contiv - /etc/contiv

View file

@ -1,15 +0,0 @@
---
# Instance settings
cloud_image_family: ubuntu-1604-lts
cloud_region: us-central1-c
mode: separate
# Deployment settings
kube_network_plugin: flannel
etcd_deployment: rkt
kubelet_deployment: rkt
download_localhost: true
download_run_once: true
deploy_netchecker: true
dns_min_replicas: 1
cloud_provider: gce

View file

@ -1,13 +0,0 @@
---
# Instance settings
cloud_image: ubuntu-1604
mode: separate
# Kubespray settings
kube_network_plugin: flannel
etcd_deployment: rkt
kubelet_deployment: rkt
download_localhost: true
download_run_once: true
deploy_netchecker: true
dns_min_replicas: 1