Saturday, June 03, 2017

Kubeadm and Ubuntu

First setup packages

NOTE replace @gt with greater than in the text below

sudo su -

apt-get update && apt-get install -y apt-transport-https
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
cat </etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
apt-get update
apt-get upgrade

This script will install the recommended version of docker on Ubuntu Server

# Install Docker on Xenial 16.04.2 server x64

# Ref https://docs.docker.com/engine/installation/linux/ubuntulinux/
# No interactive for now.
export DEBIAN_FRONTEND=noninteractive
# Update your APT package index.
sudo apt-get -y update
# Update package information, ensure that APT works with the https method, and that CA certificates are installed.
sudo apt-get -y install apt-transport-https ca-certificates
# Add the new GPG key.
sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
# Add docker.list
sudo echo "deb https://apt.dockerproject.org/repo ubuntu-xenial main" > /etc/apt/sources.list.d/docker.list
# Update your APT package index.
sudo apt-get -y update
# Purge the old repo if it exists.
sudo apt-get purge lxc-docker
# Verify that APT is pulling from the right repository.
sudo apt-cache policy docker-engine
# Install the recommended package.
sudo apt-get -y install linux-image-extra-$(uname -r)
# Install Docker.
sudo apt-get -y install docker-engine=1.12.6-0~ubuntu-xenial
# Start the docker daemon.
sudo service docker start
# Validate docker ve
docker -v

NOTE: you may need to run this if you installed ubuntu from CD:

sudo sed -i '/cdrom/d' /etc/apt/sources.list


Installing correct version of kubectl

 curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.6.2/bin/linux/amd64/kubectl
Make the kubectl binary executable.
 chmod +x ./kubectl
Move the binary in to your PATH.
 sudo mv ./kubectl /usr/local/bin/kubectl



Client Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.2", GitCommit:"477efc3cbe6a7effca06bd1452fa356e2201e1ee", GitTreeState:"clean", BuildDate:"2017-04-19T20:33:11Z", GoVersion:"go1.7.5", Compiler:"gc", Platform:"linux/amd64"}
The connection to the server localhost:8080 was refused - did you specify the right host or port?

sudo apt-get -y update

apt-cache madison kubelet


root@ubuntu1:~# apt-cache madison kubelet
   kubelet |   1.6.4-00 | http://apt.kubernetes.io kubernetes-xenial/main amd64 Packages
   kubelet |   1.6.3-00 | http://apt.kubernetes.io kubernetes-xenial/main amd64 Packages
   kubelet |   1.6.2-00 | http://apt.kubernetes.io kubernetes-xenial/main amd64 Packages
   kubelet |   1.6.1-00 | http://apt.kubernetes.io kubernetes-xenial/main amd64 Packages
   kubelet |   1.6.0-00 | http://apt.kubernetes.io kubernetes-xenial/main amd64 Packages
   kubelet |   1.5.7-00 | http://apt.kubernetes.io kubernetes-xenial/main amd64 Packages
   kubelet |   1.5.6-00 | http://apt.kubernetes.io kubernetes-xenial/main amd64 Packages
   kubelet |   1.5.3-00 | http://apt.kubernetes.io kubernetes-xenial/main amd64 Packages
   kubelet |   1.5.2-00 | http://apt.kubernetes.io kubernetes-xenial/main amd64 Packages
   kubelet |   1.5.1-00 | http://apt.kubernetes.io kubernetes-xenial/main amd64 Packages
root@ubuntu1:~# apt-cache madison kubelet

apt-get install -y kubelet=1.6.2-00


apt-cache madison kubernetes-cni


apt-cache madison kubeadm

root@ubuntu1:~# apt-cache madison kubeadm
   kubeadm |   1.6.4-00 | http://apt.kubernetes.io kubernetes-xenial/main amd64 Packages
   kubeadm |   1.6.3-00 | http://apt.kubernetes.io kubernetes-xenial/main amd64 Packages
   kubeadm |   1.6.2-00 | http://apt.kubernetes.io kubernetes-xenial/main amd64 Packages
   kubeadm |   1.6.1-00 | http://apt.kubernetes.io kubernetes-xenial/main amd64 Packages
   kubeadm |   1.5.7-00 | http://apt.kubernetes.io kubernetes-xenial/main amd64 Packages

 
 

root@ubuntu1:~# apt-get install -y kubeadm=1.6.2-00 --allow-downgrades



root@ubuntu1:~#kubeadm version
kubeadm version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.2", GitCommit:"477efc3cbe6a7effca06bd1452fa356e2201e1ee", GitTreeState:"clean", BuildDate:"2017-04-19T20:22:08Z", GoVersion:"go1.7.5", Compiler:"gc", Platform:"linux/amd64"}


root@ubuntu1:~# kubeadm reset
[preflight] Running pre-flight checks
[reset] Stopping the kubelet service
[reset] Unmounting mounted directories in "/var/lib/kubelet"
[reset] Removing kubernetes-managed containers
[reset] Deleting contents of stateful directories: [/var/lib/kubelet /etc/cni/net.d /var/lib/dockershim /var/lib/etcd]
[reset] Deleting contents of config directories: [/etc/kubernetes/manifests /etc/kubernetes/pki]
[reset] Deleting files: [/etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf]


root@ubuntu1:~# kubeadm init --kubernetes-version=v1.6.2
[kubeadm] WARNING: kubeadm is in beta, please do not use it for production clusters.
[init] Using Kubernetes version: v1.6.2
[init] Using Authorization mode: RBAC
[preflight] Running pre-flight checks
[preflight] Starting the kubelet service
[certificates] Generated CA certificate and key.
[certificates] Generated API server certificate and key.
[certificates] API Server serving cert is signed for DNS names [ubuntu1 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.1.140]
[certificates] Generated API server kubelet client certificate and key.
[certificates] Generated service account token signing key and public key.
[certificates] Generated front-proxy CA certificate and key.
[certificates] Generated front-proxy client certificate and key.
[certificates] Valid certificates and keys now exist in "/etc/kubernetes/pki"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/scheduler.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/admin.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/kubelet.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/controller-manager.conf"
[apiclient] Created API client, waiting for the control plane to become ready
[apiclient] All control plane components are healthy after 32.574628 seconds
[apiclient] Waiting for at least one node to register
[apiclient] First node has registered after 4.503987 seconds
[token] Using token: e4f356.256c0aeddd4bf937
[apiconfig] Created RBAC rules
[addons] Created essential addon: kube-proxy
[addons] Created essential addon: kube-dns

Your Kubernetes master has initialized successfully!


this is on the main network if you are dual homes you can configure the the
other nic and repeat

https://www.swiftstack.com/docs/install/configure_networking.html



root@ubuntu1:~# vi /etc/network/interfaces
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).

source /etc/network/interfaces.d/*

# The loopback network interface
auto lo
iface lo inet loopback

# The primary network interface
auto eno4
iface eno4 inet dhcp

iface eno3 inet static
    address 10.0.0.8
    netmask 255.255.255.0
    network 10.0.0.0
    broadcast 10.0.0.255
    gateway 192.168.1.1


 sudo ifup eno3


root@ubuntu1:~# ip addr
1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: eno1: mtu 1500 qdisc noop state DOWN group default qlen 1000
    link/ether 00:21:9b:a0:51:be brd ff:ff:ff:ff:ff:ff
3: eno2: mtu 1500 qdisc noop state DOWN group default qlen 1000
    link/ether 00:21:9b:a0:51:c0 brd ff:ff:ff:ff:ff:ff
4: eno3: mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:21:9b:a0:51:c2 brd ff:ff:ff:ff:ff:ff
    inet 10.0.0.8/24 brd 10.0.0.255 scope global eno3
       valid_lft forever preferred_lft forever
    inet6 fe80::221:9bff:fea0:51c2/64 scope link
       valid_lft forever preferred_lft forever
5: eno4: mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:21:9b:a0:51:c4 brd ff:ff:ff:ff:ff:ff
    inet 192.168.1.140/24 brd 192.168.1.255 scope global eno4
       valid_lft forever preferred_lft forever
    inet6 fe80::221:9bff:fea0:51c4/64 scope link
       valid_lft forever preferred_lft forever
6: docker0: mtu 1500 qdisc noqueue state DOWN group default
    link/ether 02:42:c1:75:ae:99 brd ff:ff:ff:ff:ff:ff
    inet 172.17.0.1/16 scope global docker0
       valid_lft forever preferred_lft forever


kubeadm reset
kubeadm init --kubernetes-version=v1.6.2 --apiserver-advertise-address=10.0.0.8

the advertise address appears to foul the calico install in later step.  

TODO; configure on own network


root@ubuntu1:~# sudo cp /etc/kubernetes/admin.conf $HOME/
root@ubuntu1:~#  sudo chown $(id -u):$(id -g) $HOME/admin.conf
root@ubuntu1:~# ^M
: command not found
root@ubuntu1:~# export KUBECONFIG=$HOME/admin.conf
root@ubuntu1:~# kubectl
kubectl controls the Kubernetes cluster manager.

Find more information at https://github.com/kubernetes/kubernetes.

Basic Commands (Beginner):
  create         Create a resource by filename or stdin
  expose         Take a replication controller, service, deployment or pod and expose it as a new Kubernetes Service
  run            Run a particular image on the cluster
  set            Set specific features on objects


next setup 2nd machine as above but do not do init step

first setup the new 10.0.0.x address

root@ubuntu2:~#  kubeadm join --token xxxxxxxxxxxxxxx 10.0.0.8:6443
[kubeadm] WARNING: kubeadm is in beta, please do not use it for production clusters.
[preflight] Running pre-flight checks
[discovery] Trying to connect to API Server "10.0.0.8:6443"
[discovery] Created cluster-info discovery client, requesting info from "https://10.0.0.8:6443"
[discovery] Cluster info signature and contents are valid, will use API Server "https://10.0.0.8:6443"
[discovery] Successfully established connection with API Server "10.0.0.8:6443"
[bootstrap] Detected server version: v1.6.2
[bootstrap] The server supports the Certificates API (certificates.k8s.io/v1beta1)
[csr] Created API client to obtain unique certificate for this node, generating keys and certificate signing request
[csr] Received signed certificate from the API server, generating KubeConfig...
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/kubelet.conf"

Node join complete:
* Certificate signing request sent to master and response
  received.
* Kubelet informed of new secure connection details.

Run 'kubectl get nodes' on the master to see this machine join.

root@ubuntu1:~# kubectl get nodes
NAME      STATUS     AGE       VERSION
ubuntu1   NotReady   1m        v1.6.2
ubuntu2   NotReady   2s        v1.6.2

remember to replace the @gt with greater than sign or scripts will not work

repeat until you have 5 nodes:

root@ubuntu1:~# kubectl get nodes
NAME      STATUS     AGE       VERSION
ubuntu1   NotReady   1h        v1.6.2
ubuntu2   NotReady   1h        v1.6.2
ubuntu3   NotReady   1h        v1.6.2
ubuntu5   NotReady   45m       v1.6.2
ubuntu6   NotReady   7s        v1.6.2
root@ubuntu1:~#


-------------
root@ubuntu2:~# ./calicoctl node status
Calico process is running.

IPv4 BGP status
+---------------+-------------------+-------+----------+-------------+
| PEER ADDRESS  |     PEER TYPE     | STATE |  SINCE   |    INFO     |
+---------------+-------------------+-------+----------+-------------+
| 192.168.1.145 | node-to-node mesh | up    | 02:35:46 | Established |
| 192.168.1.122 | node-to-node mesh | up    | 02:35:46 | Established |
+---------------+-------------------+-------+----------+-------------+

UPDATE:

I kept getting issue with kube-dns server.  I was able to fix by
changing the CIDR used when initializing kubeadm like this:

kubeadm init --kubernetes-version=v1.6.2  --pod-network-cidr=10.96.0.0/24

and then downloading the calico.yaml file and making changes to move the IP addresses
being used into that subnet

data:
  # The location of your etcd cluster.  This uses the Service clusterIP
  # defined below.
  etcd_endpoints: "http://10.96.0.136:6666"

and further down:

spec:
  # Select the calico-etcd pod running on the master.
  selector:
    k8s-app: calico-etcd
  # This ClusterIP needs to be known in advance, since we cannot rely
  # on DNS to get access to etcd.
  clusterIP: 10.96.0.136
  ports:
    - port: 6666


now i get kubedns to come up:

root@ubuntu1:~# kubectl get pods -o wide --all-namespaces
NAMESPACE     NAME                                        READY     STATUS    RESTARTS   AGE       IP               NODE
kube-system   calico-etcd-879dg                           1/1       Running   0          12m       192.168.1.140    ubuntu1
kube-system   calico-node-92vkh                           2/2       Running   0          12m       192.168.1.140    ubuntu1
kube-system   calico-policy-controller-1777954159-sxl73   1/1       Running   0          12m       192.168.1.140    ubuntu1
kube-system   etcd-ubuntu1                                1/1       Running   0          13m       192.168.1.140    ubuntu1
kube-system   kube-apiserver-ubuntu1                      1/1       Running   0          12m       192.168.1.140    ubuntu1
kube-system   kube-controller-manager-ubuntu1             1/1       Running   0          13m       192.168.1.140    ubuntu1
kube-system   kube-dns-3913472980-ckllx                   3/3       Running   0          14m       192.168.25.129   ubuntu1
kube-system   kube-proxy-ghkl2                            1/1       Running   0          14m       192.168.1.140    ubuntu1
kube-system   kube-scheduler-ubuntu1                      1/1       Running   0          12m       192.168.1.140    ubuntu1

had to fix tiller by adding service account:

kubectl create serviceaccount --namespace kube-system tiller
kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
kubectl patch deploy --namespace kube-system tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}'

now i can follow the docs down to the ceph install but i am seeing some errors in that now

root@ubuntu1:~/openstack-helm# helm list
NAME                    REVISION        UPDATED                         STATUS          CHART           NAMESPACE
bootstrap-ceph          1               Sun Jun 18 00:12:37 2017        DEPLOYED        bootstrap-0.1.0 ceph
bootstrap-openstack     1               Sun Jun 18 00:13:03 2017        DEPLOYED        bootstrap-0.1.0 openstack
ceph                    1               Sun Jun 18 00:12:09 2017        DEPLOYED        ceph-0.1.0      ceph
root@ubuntu1:~/openstack-helm#



No comments: