FROM node:7 ADD app.js /app.js ENTRYPOINT ["node", "app.js"]
$ docker exec -it kubia-container bash
$ kubectl get po kubia-zxzij -o yaml kubectl explain $ kubectl create -f kubia-manual.yaml $ kubectl logs kubia-manual -c kubia $ kubectl get po -l creation_method=manual $ kubectl get po -l '!env' $ kubectl delete all -all $ kubectl logs mypod --previous $ kubectl label pod kubia-dmdck app=foo --overwrite Note: The double dash (--) separates the arguments you want to pass to the command from the kubectl arguments. kubectl 에서 넘기고싶은걸 -- 로 구분할수 있다는 거임. - 김준석 $ kubectl exec kubia-3inly env
kind: PersistentVolume
metadata:
name: mongodb-pv
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
- ReadOnlymany
persistentVolumeReclaimPolicy: Retain
gcePersistentDisk :
pdName: mongodb
fsType: ext4
kind: PersistentVolumeClaim
metadata:
name: mongodb-pvc
spec:
resources:
requests:
storage: 1Gi
accessModes:
- ReadWriteOnce
storageClassname: ""
kubectl get pvc
..
volumeMounts:
- name: mongodb-data
mountpath: /data/db
..
volumes:
- name: mongodb-data
persistentVolumeClaim:
claimName: mongodb-pvc
1 ? Ssl 0:00 node app.js
1 ? ss 0:00 /bin/sh -c node app.js
7 ? sl 0:00 node app.js
| 도커 | 쿠버네티스 | 설명 |
| ENTRYPOINT | command | 컨테이너 안에서 실행되는 실행파일 |
| CMD | args | 실행파일에 전달되는 인자 |
kubectl create configmap fortune-config --from-literal=sleep-interval=25
- image: luksa/fortune:env
env:
- name: INTERVAL
valueFrom:
configMapKeyRef:
name: fortune-config
key: sleep-interval
kubectl create configmap my-config --from-file=config-file.conf args: ["$(INTERVAL)"]
volumeMounts:
...
- name: config
mountPath: /etc/nginx/conf.d
readOnly: true
...
volumes:
- name: config
configMap:
name: fortune-config
...
volumes:
- name: config
configMap:
name: fortune-config
items:
- key: my-nginx-config.conf
path: gzip.conf
...
- name: certs
mountPath: /etc/nginx/certs/
readOnly: true
...
- name: certs
secret:
secretname: fortune-https
env:
- name: FOO_SECRET
valueFrom:
secretKeyRef:
name: fortune-https
key: foo
$ kubectl create secret docker-registry mydockerhubsecret \
--docker-username=myusername --docker-password=mypassword \
--docker-email=my.email@provider.com
imagePullScrets:
- name: mydockerhubsecret
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
...
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
...
...
volumeMounts:
- name: downward
mountPath: /etc/downward
volumes:
- name: downard
downwardAPI:
items:
- path: "podName"
fieldRef:
fieldPath: metadata.name
...
kubectl cluster-info를 실행해 URL을 얻을수 있다.$ kubectl proxy and $ curl localhost:8001$ curl http://localhost:8001/apis/batch/v1$ curl http://localhost:8001/apis/batch/v1/jobshttps://kubernets / -k( -- insecure) / 인증서 및 토큰 필요 - 김준석 $ kubectl rolling-update kubia-v1 kubia-v2 --image=luksa/kubia:v2
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: kubia
spec:
replicas: 3
template:
metadata:
name: kubia
labels:
app: kubia
spec:
containers:
- image: luksa/kubia:v1
name: nodejs
$ kubectl rollout status deployment kubia $ kubectl patch deployment kubia -p '{"spec": {"minReadySeconds": 10}}' $ kubectl set image deployment kubia nodejs=luksa/kubia:v2 $ kubectl rollout status deployment kubia $ kubectl rollout pause deployment kubia $ kubectl rollout resume deployment kubia etcd 분산 저장 스토리지 API 서버 스케쥴러 컨트롤 매니저
Kubelet Kube-proxy 컨테이너 런타임(Docker, rkt 외 기타)
쿠버네티스 DNS 서버 대시보드 인그레스 컨트롤러 힙스터 컨테이너 네트워크 인터페이스 플러그인
$ etcdctl ls /registry $ kubectl get events --watch $ kubectl get sa $ kubectl create serviceaccount foo
imagepullSecrets:
- name: my-dockerhub-secret
spec :
serviceAccountName: foo
spec:
hostNetwork: true
spec:
containers:
..
ports:
- containerPort: 8080
hostPort: 9000
protocol: TCP
spec: hostPID: true hostIPC: true
$ kubectl exec pod-with-defaults id uid=0(root) gid=0(root) ...
spec :
containers:
..
securityContext:
runAsUser: 405
securityContext:
runAsNonRoot: true
securityContext:
privileged: true
securityContext:
capabilities:
add:
- SYS_TIME
securityContext:
capabilities:
drop:
- CHown
// 파일시스템은 못쓰지만 마운트 볼륨은 씀
securityContext:
readonlyRootFilesystem: true
volumeMounts:
- name: my-volume
mountpath: /volume
readOnly: false
securityContext:
fsGroup: 555
supplementalGroups: [666, 777]
$ kubectl create clusterrole psp-default --verb=use --resource=podsecuritypolicies --resource-name=default $ kubectl create clusterrole psp-privileged --verb=use --resource=podsecuritypolicies --resource-name=previleged
$ kubectl create clusterrolebinding psp-all-users --clusterrole=psp-defulat --group=system:authenticated $ kubectl create clusterrolebinding psp-bob --clusterrole=psp-privileged --user=bob
kind: NetworkPolicy metadata: name: default-deny spec: podSelector: // 모두 매칭되어 모든 네트워크 deny
kind: NetworkPolicy
metadata:
name: postgres-netpolicy
spec:
podSelector:
matchLabels:
app: database
ingress:
- from:
- podSelector:
matchLabels:
app: webserver
ports:
- port: 5432
ingress:
- from:
- namespaceSelector:
matchLabels:
tenant: manning
ports:
- port: 80
ingress:
- from:
- inBlock:
cidr: 192.168.1.0/24
egress:
- to:
- podSelector:
matchLabels:
app: database
$ kubectl exec -it requrests-pod top | CPU 요청 대 제한 | 메모리 요청대 제한 | 컨테이너 QoS 리소스 |
| 미설정 | 미설정 | BestEffort |
| 미설정 | 요청 < 제한 | Burstable |
| 미설정 | 요청 = 제한 | Burstable |
| 요청 < 제한 | 미설정 | Burstable |
| 요청 < 제한 | 요청 < 제한 | Burstable |
| 요청 < 제한 | 요청 = 제한 | Burstable |
| 요청 = 제한 | 요청 = 제한 | Guaranteed |
$ kubectl autoscale deployment kubia --cpu-percent=30 --min=1 --max=5 // cpu 사용률 30% 유지 최소 1 최대 5
$ kubectl get hpa
cloud provider라는 componemet 에게 위임합니다. 잘 알려진 제공자(GCP, AWS)가 아닌경우 cloud provider 를 직접 구현해야 해요 - bluemir$ kubectl cordon <node> // 명령은 노드를 스케줄링할 수 없음으로 표시함 $ kubectl drain <node> // 명령은 노드를 스케줄링 할 수 없음으로 표시하고 노드에서 실행 중인 모든 파드를 종료한다.
node-role.kubernetes.io/master <key> null <value> 효과는 NoSchedule
$ kubectl taint node node1.k8s node-type=production:NoSchedule
spec:
tolerations:
- key: node-type
Operator: Equal
value: production
effect: NoSchedule
apiVersion: v1
kind: Pod
metadata:
name: kubia-gpu
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: gpu
operator: In
values:
- "true"
containers:
- image: luksa/kubia
name: kubia
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: pref
spec:
replicas: 5
template:
metadata:
labels:
app: pref
spec:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 80
preference:
matchExpressions:
- key: availability-zone
operator: In
values:
- zone1
- weight: 20
preference:
matchExpressions:
- key: share-type
operator: In
values:
- dedicated
...
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: frontend
spec:
replicas: 5
template:
metadata:
labels:
app: frontend
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: kubernetes.io/hostname
labelSelector:
matchLabels:
app: frontend
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: frontend
spec:
replicas: 5
template:
metadata:
labels:
app: frontend
spec:
affinity:
podAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 80
podAffinityTerm:
topologyKey: kubernetes.io/hostname
labelSelector:
matchLabels:
app: backend
piVersion: extensions/v1beta1
kind: Deployment
metadata:
name: frontend
spec:
replicas: 5
template:
metadata:
labels:
app: frontend
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: kubernetes.io/hostname
labelSelector:
matchLabels:
app: frontend
...