diff --git a/04 Replication and other Controllers/kubia-rc.yaml b/04 Replication and other Controllers/kubia-rc.yaml index 7ef28e2..0d42c7c 100644 --- a/04 Replication and other Controllers/kubia-rc.yaml +++ b/04 Replication and other Controllers/kubia-rc.yaml @@ -3,7 +3,7 @@ kind: ReplicationController metadata: name: kubia spec: - replicas: 3 + replicas: 4 selector: app: kubia template: diff --git a/04 Replication and other Controllers/kubia-replicaset-matchexpressions.yaml b/04 Replication and other Controllers/kubia-replicaset-matchexpressions.yaml index 6e8c072..7c9ac3e 100644 --- a/04 Replication and other Controllers/kubia-replicaset-matchexpressions.yaml +++ b/04 Replication and other Controllers/kubia-replicaset-matchexpressions.yaml @@ -10,10 +10,15 @@ spec: operator: In values: - kubia + - key: app1 + operator: In + values: + - kubia1 template: metadata: labels: app: kubia + app1: kubia1 spec: containers: - name: kubia diff --git a/04 Replication and other Controllers/multi-completion-batch-job.yaml b/04 Replication and other Controllers/multi-completion-batch-job.yaml index bdb1ea0..40d55ca 100644 --- a/04 Replication and other Controllers/multi-completion-batch-job.yaml +++ b/04 Replication and other Controllers/multi-completion-batch-job.yaml @@ -3,7 +3,7 @@ kind: Job metadata: name: multi-completion-batch-job spec: - completions: 5 + completions: 5 #run this job 5 times one after the other template: metadata: labels: diff --git a/04 Replication and other Controllers/multi-completion-parallel-batch-job.yaml b/04 Replication and other Controllers/multi-completion-parallel-batch-job.yaml index a0cef8d..4b69a1f 100644 --- a/04 Replication and other Controllers/multi-completion-parallel-batch-job.yaml +++ b/04 Replication and other Controllers/multi-completion-parallel-batch-job.yaml @@ -4,7 +4,7 @@ metadata: name: multi-completion-batch-job spec: completions: 5 - parallelism: 2 + parallelism: 2 #run 2 jobs in parallel then again 2 then 1 template: metadata: labels: diff --git a/05 Services/external-service-endpoints.yaml b/05 Services/external-service-endpoints.yaml index e48e53c..27d7d05 100644 --- a/05 Services/external-service-endpoints.yaml +++ b/05 Services/external-service-endpoints.yaml @@ -4,7 +4,7 @@ metadata: name: external-service subsets: - addresses: - - ip: 11.11.11.11 - - ip: 22.22.22.22 + - ip: 18.140.68.238 + - ip: 54.169.220.93 ports: - port: 80 diff --git a/05 Services/kubia-ingress.yaml b/05 Services/kubia-ingress.yaml index 6386292..cf1ced7 100644 --- a/05 Services/kubia-ingress.yaml +++ b/05 Services/kubia-ingress.yaml @@ -1,5 +1,5 @@ apiVersion: extensions/v1beta1 -kind: Ingress +kind: Ingress #this will configure rules on Ingress Controller. We first need to create Ingress Controller. that will be another yaml file metadata: name: kubia spec: @@ -9,5 +9,5 @@ spec: paths: - path: / backend: - serviceName: kubia-nodeport + serviceName: kubia-nodeport # we dont need to use nodeport. we can use cluster IP service servicePort: 80 diff --git a/06 Volumes/fortune-pod.yaml b/06 Volumes/fortune-pod.yaml index ecd4141..7788c0d 100644 --- a/06 Volumes/fortune-pod.yaml +++ b/06 Volumes/fortune-pod.yaml @@ -7,12 +7,12 @@ spec: - image: luksa/fortune name: html-generator volumeMounts: - - name: html + - name: html #anything written on /var/htdocs will be written to html volume mountPath: /var/htdocs - image: nginx:alpine name: web-server volumeMounts: - - name: html + - name: html #/usr/share/nginx/html will be mapped to html so it will read whatever is written by fortune app mountPath: /usr/share/nginx/html readOnly: true ports: @@ -20,5 +20,5 @@ spec: protocol: TCP volumes: - name: html - emptyDir: {} + emptyDir: {} #type of volume. emptyDir means volume is empty. other options are -> gitRepo so it will copy contents of repo to the volume. hostPath will put the data on host dir diff --git a/06 Volumes/mongodb-pod-gcepd.yaml b/06 Volumes/mongodb-pod-gcepd.yaml index b2bdfd6..3c41c69 100644 --- a/06 Volumes/mongodb-pod-gcepd.yaml +++ b/06 Volumes/mongodb-pod-gcepd.yaml @@ -5,7 +5,7 @@ metadata: spec: volumes: - name: mongodb-data - gcePersistentDisk: + gcePersistentDisk: #for google storage pdName: mongodb fsType: nfs4 containers: diff --git a/06 Volumes/mongodb-pvc.yaml b/06 Volumes/mongodb-pvc.yaml index 737107d..43b4dd2 100644 --- a/06 Volumes/mongodb-pvc.yaml +++ b/06 Volumes/mongodb-pvc.yaml @@ -5,7 +5,7 @@ metadata: spec: resources: requests: - storage: 1Gi + storage: 500Mi accessModes: - ReadWriteOnce storageClassName: "" diff --git a/07 ConfigMaps and Secrets/fortune-pod-args-configmap.yaml b/07 ConfigMaps and Secrets/fortune-pod-args-configmap.yaml index 208899d..07bc0e2 100644 --- a/07 ConfigMaps and Secrets/fortune-pod-args-configmap.yaml +++ b/07 ConfigMaps and Secrets/fortune-pod-args-configmap.yaml @@ -6,11 +6,11 @@ spec: containers: - image: luksa/fortune:args env: - - name: INTERVAL + - name: INTERVAL #its a environment variable valueFrom: configMapKeyRef: - name: fortune-config - key: sleep-interval + name: fortune-config #name of config map + key: sleep-interval #name of the key args: ["$(INTERVAL)"] name: html-generator volumeMounts: diff --git a/07 ConfigMaps and Secrets/fortune-pod-args.yaml b/07 ConfigMaps and Secrets/fortune-pod-args.yaml index 5382590..0ec887c 100644 --- a/07 ConfigMaps and Secrets/fortune-pod-args.yaml +++ b/07 ConfigMaps and Secrets/fortune-pod-args.yaml @@ -5,7 +5,7 @@ metadata: spec: containers: - image: luksa/fortune:args - args: ["2"] + args: ["2"] #will be taken as $INTERVAL in fortune-args/fortuneloop.sh name: html-generator volumeMounts: - name: html diff --git a/07 ConfigMaps and Secrets/fortune-pod-configmap-volume-with-items.yaml b/07 ConfigMaps and Secrets/fortune-pod-configmap-volume-with-items.yaml index 231350a..12fe8f4 100644 --- a/07 ConfigMaps and Secrets/fortune-pod-configmap-volume-with-items.yaml +++ b/07 ConfigMaps and Secrets/fortune-pod-configmap-volume-with-items.yaml @@ -26,8 +26,8 @@ spec: emptyDir: {} - name: config configMap: - name: fortune-config + name: fortune-config #take from this configMap items: - - key: my-nginx-config.conf - path: gzip.conf + - key: my-nginx-config.conf #take contents of this key (which is file name) + path: gzip.conf #put contents of this key in this file/path diff --git a/Pods.md b/Pods.md index 622696b..3bd3d2b 100644 --- a/Pods.md +++ b/Pods.md @@ -1,5 +1,5 @@ ## create the pod from your YAML file, use the kubectl create command: - kubectl create -f kubia-manual.yaml + kubectl create -f kubia-manual.yaml #kubernetes-training/03 Pods folder has these files ## Get pod description: kubectl get po kubia-zxzij -o yaml diff --git a/installation.md b/installation.md index 7f31f8d..130d9f8 100644 --- a/installation.md +++ b/installation.md @@ -29,7 +29,7 @@ On both master and slave nodes : EOF sysctl --system - setenforce 0 + setenforce 0 . #disable secure linux. not recomended for production ### install kubelet, kubeadm and kubectl; start kubelet daemon ### Do it on both master as welll as worker nodes @@ -69,8 +69,12 @@ watch system pods kubectl get pods --all-namespaces -on all the worker nodes do +on all the worker nodes do below to make kubectl command work on worker mkdir -p $HOME/.kube export KUBECONFIG=/etc/kubernetes/kubelet.conf +to run a service, run following command on master + + kubectl run nginx --image=nginx --port=80 --> if you get forbidden error then exit and do "sudo su -" + kubectl get pods -o wide . --> to see where pod got created diff --git a/istio-setup.md b/istio-setup.md new file mode 100644 index 0000000..bb8da4e --- /dev/null +++ b/istio-setup.md @@ -0,0 +1,56 @@ +## Download Istio + + curl -L https://git.io/getLatestIstio | ISTIO_VERSION=1.0.0 sh - + + export PATH="$PATH:/root/istio-1.0.0/bin" + + cd /root/istio-1.0.0 + +## Create CRDs +Deploy the extensions by applying crds.yaml - + + kubectl apply -f install/kubernetes/helm/istio/templates/crds.yaml -n istio-system + + Wait for some time. List out all the CRDs created for Istio + + kubectl api-resources|grep -i istio + + +## Install Istio with default mutual TLS authentication: + This will deploy Pilot, Mixer, Ingress-Controller, and Egress-Controller, and the Istio CA (Certificate Authority). + + kubectl apply -f install/kubernetes/istio-demo-auth.yaml + +## Check status +All the services are deployed as Pods. + + kubectl get pods -n istio-system + +## Deploy Sample Application + + kubectl apply -f <(istioctl kube-inject -f samples/bookinfo/platform/kube/bookinfo.yaml) + +## Deploy gateway - + + kubectl apply -f samples/bookinfo/networking/bookinfo-gateway.yaml + +Check Status + + kubectl get pods + +## Apply default destination rules +Before you can use Istio to control the Bookinfo version routing, you need to define the available versions, called subsets, in destination rules. + + kubectl apply -f samples/bookinfo/networking/destination-rule-all-mtls.yaml + +## Control Routing +One of the main features of Istio is its traffic management. As a Microservice architectures scale, there is a requirement for more advanced service-to-service communication control. + +## User Based Testing / Request Routing +One aspect of traffic management is controlling traffic routing based on the HTTP request, such as user agent strings, IP address or cookies. + +The example below will send all traffic for the user "jason" to the reviews:v2, meaning they'll only see the black stars. + + cat samples/bookinfo/networking/virtual-service-reviews-test-v2.yaml + +Similarly to deploying Kubernetes configuration, routing rules can be applied using istioctl. diff --git a/networking/calico/calicoctl-setup.md b/networking/calico/calicoctl-setup.md new file mode 100644 index 0000000..53203d3 --- /dev/null +++ b/networking/calico/calicoctl-setup.md @@ -0,0 +1,17 @@ +Launch calicoctl in a pod + + kubectl apply -f https://docs.projectcalico.org/v3.5/getting-started/kubernetes/installation/hosted/calicoctl.yaml + +Create an alias so that you can use calicotl utility from the host itself + + alias calicoctl="kubectl exec -i -n kube-system calicoctl /calicoctl -- " + + +Try the command below to see if it is working properly + + calicoctl get node + +For more help run + + calicoctl --help + diff --git a/networking/calico/enforce-kubernetes-network-policy-tutorial.md b/networking/calico/enforce-kubernetes-network-policy-tutorial.md new file mode 100644 index 0000000..798a436 --- /dev/null +++ b/networking/calico/enforce-kubernetes-network-policy-tutorial.md @@ -0,0 +1,3 @@ +For enforcing star network policy refer to this link: + +https://docs.projectcalico.org/v2.3/getting-started/kubernetes/tutorials/stars-policy/