Set up the EFK stack in kubernetes:
service namespace
apiVersion: v1
kind: Namespace
metadata:
name: core-service
elasticsearch
apiVersion: apps/v1
kind: Deployment
metadata:
name: elasticsearch
labels:
component: elasticsearch
spec:
selector:
matchLabels:
component: elasticsearch
template:
metadata:
labels:
component: elasticsearch
spec:
containers:
- name: elasticsearch
image: docker.elastic.co/elasticsearch/elasticsearch:7.4.1
env:
- name: discovery.type
value: single-node
ports:
- containerPort: 9200
name: http
protocol: TCP
resources:
limits:
cpu: 500m
memory: 4Gi
requests:
cpu: 500m
memory: 4Gi
---
apiVersion: v1
kind: Service
metadata:
name: elasticsearch
labels:
service: elasticsearch
spec:
type: NodePort
selector:
component: elasticsearch
ports:
- name: es
port: 9200
targetPort: 9200
fluentd and fluentd roles
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fluentd
namespace: core-service
labels:
k8s-app: fluentd-logging
version: v1
kubernetes.io/cluster-service: "true"
spec:
selector:
matchLabels:
k8s-app: fluentd-logging
template:
metadata:
labels:
k8s-app: fluentd-logging
version: v1
kubernetes.io/cluster-service: "true"
spec:
serviceAccount: fluentd
serviceAccountName: fluentd
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
containers:
- name: fluentd
image: fluent/fluentd-kubernetes-daemonset:v1.3-debian-elasticsearch
env:
- name: FLUENT_ELASTICSEARCH_HOST
value: "elasticsearch.default.svc"
- name: FLUENT_ELASTICSEARCH_PORT
value: "9200"
- name: FLUENT_ELASTICSEARCH_SCHEME
value: "http"
- name: FLUENT_UID
value: "0"
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 200Mi
volumeMounts:
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containersapiVersion: v1
kind: ServiceAccount
metadata:
name: fluentd
namespace: core-service---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: fluentd
namespace: core-service
rules:
- apiGroups:
- ""
resources:
- pods
- namespaces
verbs:
- get
- list
- watch---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: fluentd
roleRef:
kind: ClusterRole
name: fluentd
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: fluentd
namespace: core-service
kibana
apiVersion: apps/v1
kind: Deployment
metadata:
name: kibana
spec:
selector:
matchLabels:
run: kibana
template:
metadata:
labels:
run: kibana
spec:
containers:
- name: kibana
image: docker.elastic.co/kibana/kibana:7.4.1
env:
- name: ELASTICSEARCH_URL
value: http://elasticsearch:9200
- name: XPACK_SECURITY_ENABLED
value: "true"
ports:
- containerPort: 5601
name: http
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: kibana
labels:
service: kibana
spec:
type: NodePort
selector:
run: kibana
ports:
- name: kbai
port: 5601
targetPort: 5601
The connecting parts between the components is the elastic search URL
elasticsearch.namespace.svc:9200
if accross namespace
elasticsearch:9200
if it’s the same namespace
even though on stack monitoring, it’s expecting filebeat
fluentd is actually working well with elasticsearch to pump in the logs