Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Json parsing #115

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -8,3 +8,4 @@
## Environment normalization:
/.bundle
/vendor/bundle
helm
14 changes: 9 additions & 5 deletions charts/loghouse/templates/fluentd/fluentd-configmap.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,10 @@ data:
@type record_modifier
<record>
_json_log_ ${ log = record["log"].strip; if log[0].eql?('{') && log[-1].eql?('}'); begin; JSON.parse(log); rescue JSON::ParserError; end; end }
_params_select_ ${ record["_json_log_"] ? record["_json_log_"].select{|k, v| k == 'params' && v.is_a?(Hash)} : nil }
_params_hash_ ${ !record["_params_select_"].nil? && !record["_params_select_"]["params"].nil? ? record["_params_select_"]["params"] : nil }
_params_keys_ ${ !record["_params_hash_"].nil? ? record["_params_hash_"].keys.map{ |key| 'params_' + key } : []}
_params_values_ ${ !record["_params_hash_"].nil? ? record["_params_hash_"].values.map(&:to_s) : []}
timestamp ${time}
nsec ${record["time"].split('.').last.to_i}
# static fields
Expand All @@ -95,8 +99,8 @@ data:
# dynamic fields
labels.names ${record["kubernetes"]["labels"].keys}
labels.values ${record["kubernetes"]["labels"].values}
string_fields.names ${record["_json_log_"] ? record["_json_log_"].select{|_, v| !v.nil? && !v.is_a?(Numeric) && !v.is_a?(TrueClass) && !v.is_a?(FalseClass)}.keys : ["log"]}
string_fields.values ${record["_json_log_"] ? record["_json_log_"].select{|_, v| !v.nil? && !v.is_a?(Numeric) && !v.is_a?(TrueClass) && !v.is_a?(FalseClass)}.values.map(&:to_s) : [record["log"]]}
string_fields.names ${record["_json_log_"] ? record["_json_log_"].select{|k, v| k != 'params' && !v.nil? && !v.is_a?(Numeric) && !v.is_a?(TrueClass) && !v.is_a?(FalseClass)}.keys + record["_params_keys_"] : ["log"]}
string_fields.values ${record["_json_log_"] ? record["_json_log_"].select{|k, v| k != 'params' && !v.nil? && !v.is_a?(Numeric) && !v.is_a?(TrueClass) && !v.is_a?(FalseClass)}.values.map(&:to_s) + record["_params_values_"] : [record["log"]]}

number_fields.names ${record["_json_log_"] ? record["_json_log_"].select{|_, v| v.is_a?(Numeric)}.keys : []}
number_fields.values ${record["_json_log_"] ? record["_json_log_"].select{|_, v| v.is_a?(Numeric)}.values : []}
Expand All @@ -106,7 +110,7 @@ data:

null_fields.names ${record["_json_log_"] ? record["_json_log_"].select{|_, v| v.nil?}.keys : []}
</record>
remove_keys kubernetes, docker, master_url, time, log, _json_log_
remove_keys kubernetes, docker, master_url, time, log, _json_log_, _params_select_, _params_hash_, _params_keys_, _params_values_
</filter>

<filter docker.**>
Expand Down Expand Up @@ -137,9 +141,9 @@ data:

<filter **>
@type record_modifier
whitelist_keys timestamp, nsec, source, namespace, host, pod_name, container_name, stream, labels.names, labels.values, string_fields.names, string_fields.values, number_fields.names, number_fields.values, boolean_fields.names, boolean_fields.values, null_fields.names
whitelist_keys timestamp, nsec, source, namespace, host, pod_name, container_name, stream, labels.names, labels.values, string_fields.names, string_fields.values, number_fields.names, number_fields.values, boolean_fields.names, boolean_fields.values, null_fields.names
</filter>

<match **>
@type exec
command bash /usr/local/bin/insert_ch.sh
Expand Down
7 changes: 5 additions & 2 deletions charts/loghouse/templates/fluentd/fluentd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,16 +12,19 @@ spec:
metadata:
labels:
k8s-app: fluentd
kubernetes.io/cluster-service: "true"
# kubernetes.io/cluster-service: "true"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
# scheduler.alpha.kubernetes.io/critical-pod: ''
checksum/config: {{ include (print $.Template.BasePath "/fluentd/fluentd-configmap.yaml") . | sha256sum }}
spec:
serviceAccountName: fluentd
containers:
- name: fluentd
image: flant/loghouse-fluentd:{{ template "app.version" $ }}
imagePullPolicy: {{ .Values.imagePullPolicy }}
env:
- name: TD_AGENT_OPTIONS
value: -vv
- name: FLUENTD_ARGS
value: --no-supervisor -q
- name: CLICKHOUSE_SERVER
Expand Down
17 changes: 10 additions & 7 deletions charts/loghouse/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,28 +5,31 @@
# result: YWRtaW46JGFwcjEkMzdxSEwvTVIkcEFvdzEzZDUwMkd5VFc2VDNlQmJiMAoK
auth: YWRtaW46JGFwcjEkelhESkU5YTkkRkU0OFdnZlBMZlJJQjk0bVhXZVprMAoK

app:
version: 0.2.2

# Password for default user in clickhouse
clickhouse_pass_original: password

# Settings for ingress
ingress:
enable: true
enable_https: true
enable_https: false
clickhouse:
host: clickhouse.domain.com
path: "/"
tls_secret_name: clickhouse
loghouse:
host: loghouse.domain.com
host: loghouse.dev1.cgdlp.io
path: "/"
tls_secret_name: loghouse
tabix:
host: tabix.domain.com
path: "/"
tls_secret_name: loghouse
# annotations:
# - 'kubernetes.io/ingress.class: traefik'
# - 'traefik.frontend.passHostHeader: "true"'
annotations:
- 'kubernetes.io/ingress.class: traefik'
- 'traefik.frontend.passHostHeader: "true"'

# Enable tabix
enable_tabix: false
Expand Down Expand Up @@ -63,7 +66,7 @@ partition_period: 1
# effect: 'NoSchedule'

# If you not want install fluentd on master node
install_master: false
install_master: true

imagePullPolicy: Always

Expand All @@ -90,7 +93,7 @@ fluentd:
cpu: 1
memory: 512Mi
requests:
cpu: 0.5
cpu: 0.1
memory: 256Mi
loghouse:
resources:
Expand Down