-
Notifications
You must be signed in to change notification settings - Fork 31
Sample Elasticsearch Filebeat Kibana
ryane edited this page Sep 2, 2016
·
3 revisions
Example of using converge to provision a vagrant machine with Elasticsearch, Kibana, and Filebeat.
Dependent on
# -*- mode: ruby -*-
# vi: set ft=ruby :
Vagrant.configure("2") do |config|
config.vm.box = "centos/7"
config.vm.synced_folder ".", "/vagrant", type: "rsync", rsync__exclude: ".git/"
config.vm.network "forwarded_port", guest: 5601, host: 5601
# change the source to a linux_amd64 converge binary
config.vm.provision "file", source: "/Users/ryan/Projects/golang/src/github.com/asteris-llc/converge/build/converge_0.1.1_linux_amd64/converge", destination: "converge"
config.vm.provision "shell", inline: "mv converge /usr/local/bin; sudo /usr/local/bin/converge apply --local --log-level=info /vagrant/elk.hcl"
end
After running vagrant up
, you should have a working Kibana instance backed by Elasticsearch. Filebeat is installed on the Vagrant host and is configured to send logs to Elasticsearch.
This is the hcl I am using. It is commented with notes, questions, and issues I ran into when trying to build this.
# For this to run successfully, you need a version of converge compiled with the following two pull requests merged in:
# https://github.com/asteris-llc/converge/pull/215
# https://github.com/asteris-llc/converge/pull/216
# I would like to split this into multiple files
param "docker-package" {
default = "docker-engine"
}
param "docker-service" {
default = "docker"
}
param "docker-group" {
default = "docker"
}
param "user-name" {
default = "vagrant"
}
param "elasticsearch-data-directory" {
default = "/data/elasticsearch"
}
param "filebeat-service" {
default = "filebeat"
}
task "filebeat-install" {
check = "yum list installed filebeat"
apply = "rpm -ivh https://download.elastic.co/beats/filebeat/filebeat-1.3.0-x86_64.rpm"
depends=["task.docker-install"]
}
# task names cannot accept "."s. I originally tried to name this "filebeat.yml". we should document what is a valid task name
file.content "filebeat-yml" {
destination = "/etc/filebeat/filebeat.yml"
# file.content should support loading external templates. inline can get unwieldy pretty quickly. The sample version of the config file that filebeat ships is 426 lines.
content = <<EOF
filebeat:
prospectors:
- paths:
- /var/log/*.log
- /var/log/messages
input_type: log
registry_file: /var/lib/filebeat/registry
output:
elasticsearch:
hosts: ["localhost:9200"]
EOF
depends = ["task.filebeat-install"]
}
task "filebeat-enable" {
check = "systemctl is-enabled {{param `filebeat-service`}}"
apply = "systemctl enable {{param `filebeat-service`}}"
depends = ["file.content.filebeat-yml"]
}
# this will usually fail on the first run b/c elasticsearch is not yet ready
# when this task runs. a subsequent apply will usually make it work but we may
# need some kind of watcher / poller / healthcheck / wait_for module. or are there
# are any other suggestions on how to handle something like this?
task "filebeat-elasticsearch-template" {
check = "[[ \"$(curl 'http://localhost:9200/_template/filebeat' 2>/dev/null)\" != \"{}\" ]]"
apply = "curl -XPUT 'http://localhost:9200/_template/filebeat' -d@/etc/filebeat/filebeat.template.json 2>/dev/null"
depends = ["task.filebeat-enable", "docker.container.elasticsearch-container"]
}
task "filebeat-start" {
check = "systemctl is-active {{param `filebeat-service`}}"
apply = "systemctl start {{param `filebeat-service`}}"
depends = ["task.filebeat-enable", "docker.container.elasticsearch-container"]
}
file.content "docker-repo" {
destination = "/etc/yum.repos.d/docker.repo"
content = <<EOF
[dockerrepo]
name=Docker Repository
baseurl=https://yum.dockerproject.org/repo/main/centos/7/
enabled=1
gpgcheck=1
gpgkey=https://yum.dockerproject.org/gpg
EOF
}
# yum / rpm shell tasks are vulnerable to conflicts (due to yum locks). You will
# see a lot of this in the output:
# ...
# Existing lock /var/run/yum.pid: another copy is running as pid 11771.
# Another app is currently holding the yum lock; waiting for it to exit...
# ...
# yum is # smart enough to wait for each individual install to complete and
# converge (so far) eventually is able to install all packages without
# erroring out. But, when we build a package module, we'll probably want to
# handle this so it is more efficient (batch up yum installs in a single
# transaction?). should this (module-specific implicit dependencies/batching) be
# part of the core graph engine?
task "docker-install" {
check = "yum list installed {{param `docker-package`}}"
# I'd like to be able to run yum update -y only if file.content.docker-repo has changed.
apply = "yum update -y; yum install -y {{param `docker-package`}}"
depends = ["file.content.docker-repo"]
}
task "docker-user-group" {
check = "groups {{param `user-name`}} | grep -i {{param `docker-group`}}"
apply = "usermod -aG {{param `docker-group`}} {{param `user-name`}}"
depends = ["task.docker-install"]
}
task "docker-enable" {
check = "systemctl is-enabled {{param `docker-service`}}"
apply = "systemctl enable {{param `docker-service`}}"
depends = ["task.docker-user-group"]
}
task "docker-start" {
check = "systemctl is-active {{param `docker-service`}}"
apply = "systemctl start {{param `docker-service`}}"
depends = ["task.docker-enable"]
}
task "elasticsearch-data-directory" {
check = "test -d {{param `elasticsearch-data-directory`}}"
apply = "mkdir -p {{param `elasticsearch-data-directory`}}"
}
# without the changes in https://github.com/asteris-llc/converge/pull/216, the
# following tasks will always fail on the first run. Well, the image tasks fail
# silently and the container tasks return errors. we might need a generic way to
# handle a situation where we expect a Check to return an error but we still want
# the tasks to run during Apply.
docker.image "elasticsearch-image" {
name = "elasticsearch"
tag = "2.4.0"
depends = ["task.docker-start"]
}
docker.container "elasticsearch-container" {
name = "elasticsearch"
image = "elasticsearch:2.4.0"
command = ["elasticsearch", "-Des.insecure.allow.root=true"]
ports = ["127.0.0.1:9200:9200"]
volumes = ["{{param `elasticsearch-data-directory`}}:/usr/share/elasticsearch/data"]
force = "true"
depends = ["task.elasticsearch-data-directory", "docker.image.elasticsearch-image"]
}
docker.image "kibana-image" {
name = "kibana"
tag = "4.6.0"
depends = ["task.docker-start"]
}
docker.container "kibana-container" {
name = "kibana"
image = "kibana:4.6.0"
ports = ["5601:5601"]
links = ["elasticsearch:elasticsearch"]
force = "true"
depends = ["docker.image.kibana-image", "docker.container.elasticsearch-container"]
}