Fluentd 没有选择新的日志文件并且 POS 文件没有得到更新

时间:2021-02-02 06:35:22

标签: ruby fluentd

我遇到了一种情况,即 fluentd 作为 kubernetes 集群中的守护进程运行时没有选择新的日志文件,这是随机发生的。有时流利的重启工作。这是我的配置。也没有看到 app.log.pos 文件正在更新。感谢有人可以帮助我

我的环境

  • Fluentd docker 镜像版本:fluentd:v1.12.0-debian-1.0

  • 主机操作系统:Red Hat Enterprise Linux 7.9

  • 列表项

主机内核版本:3.10.0-1160.6.1.el7.x86_64

我的配置

<source>
  @type tail
  path /data/logs/*/app/*.log
  pos_file /data/logs/app.log.pos
  path_key tailed_path
  tag ms-logs-application
  read_from_head true
  follow_inodes true
  refresh_interval 20s
  enable_stat_watcher false
  <parse>
    @type none
  </parse>
  time_format %Y-%m-%dT%H:%M:%S.%NZ      
</source>
<filter ms-logs-application>
  @type concat
  key message
  multiline_start_regexp /\d{4}-\d{1,2}-\d{1,2}/
  flush_interval 10
  timeout_label @NORMAL
</filter>
<match ms-logs-application>
  @type relabel
  num_threads 8
  @label @NORMAL
</match>
<label @NORMAL>
  <filter ms-logs-application>
   @type parser
   key_name message
   reserve_data true
    <parse>
      @type grok
    grok_failure_key grokfailure
    <grok>
      pattern (?<message>[^\]]+ (?<timestamp>%{HOUR}:%{MINUTE}:%{SECOND}.%{NONNEGINT})\|\[(?<thread>[^\]]+)\]\|%{IPORHOST:pod_instance}\|(?<severity>([Aa]lert|ALERT|[Tt]race|TRACE|[Dd]ebug|DEBUG|[Nn]otice|NOTICE|[Ii]nfo?(?:rmation)?| INFO?(?:RMATION)?|[Ww]arn?(?:ing)?|WARN?(?:ING)?|[Ee]rr?(?:or)?|ERR?(?:OR)?|[Cc]rit?(?:ical)?|CRIT?(?:ICAL)?|[Ff]atal|FATAL|[Ss]evere|SEVERE|EMERG(?:ENCY)?|[Ee]merg(?:ency)?))\|%{GREEDYDATA:log_type}\|%{GREEDYDATA:application}\|%{GREEDYDATA:microservice}\|%{UUID:uuid}\|(?<message_type>[^\]]+)\|(?<fullmessage>(.|\r|\n)*))
      </grok>       
    </parse>
  </filter>  
  
  <filter ms-logs-application>
    @type record_transformer
    remove_keys fullmessage
    enable_ruby
    <record>
      host.name ${hostname}
      remote_ip "#{(Socket.ip_address_list.detect do |intf| intf.ipv4_private? end).ip_address}"
      log.file.path "${record['tailed_path']}"
    #remote_ip "%#{@metadata.ip_address}"
    </record>
  </filter>
  
  <match ms-logs-application>
    @type rewrite_tag_filter
    num_threads 8
    <rule>
      key grokfailure
      pattern /.*/
      tag grokfailure_log_app
    </rule>
    <rule>
      key application
      pattern /.*/
      tag ms-logs-app-matched
    </rule>     
  </match>
  
  <match ms-logs-app-matched>
    @type elasticsearch_dynamic
    num_threads 8
    @log_level info
    host <IP>
    suppress_type_name true
    include_tag_key true
    reload_connections true
    #port 9200
    logstash_format true
    #index_name fluentd.${tag}.%Y%m%d
    
    #%{application}-%{+YYYY.MM.dd}
    logstash_prefix myapp-application-${record['application']}
    <buffer>
       @type file
       path /data/logs/*/app/*.log
       flush_mode interval
       retry_type exponential_backoff
       flush_thread_count 8
       flush_interval 5s
       retry_forever true
       retry_max_interval 30
       chunk_limit_size 2M
       queue_limit_length 32
       overflow_action throw_exception
      </buffer>
  </match>  
  
  <match grokfailure_log_app>
    @type elasticsearch_dynamic
    num_threads 8
    @log_level info
    suppress_type_name true
    include_tag_key true
    reload_connections true
    hosts <ip>
    #port 9200
    logstash_format true
    #%{application}-%{+YYYY.MM.dd}
    logstash_prefix app-nonematch
    #type_name fluentd.${tag}.%Y%m%d
  </match>    
</label>   
<filter ms-logs-application>
 @type parser
 key_name message
 reserve_data true
  <parse>
    @type grok
    grok_failure_key grokfailure
    <grok>
    pattern (?<message>[^\]]+ (?<timestamp>%{HOUR}:%{MINUTE}:%{SECOND}.%{NONNEGINT})\|\[(?<thread>[^\]]+)\]\|%{IPORHOST:pod_instance}\|(?<severity>([Aa]lert|ALERT|[Tt]race|TRACE|[Dd]ebug|DEBUG|[Nn]otice|NOTICE|[Ii]nfo?(?:rmation)?| INFO?(?:RMATION)?|[Ww]arn?(?:ing)?|WARN?(?:ING)?|[Ee]rr?(?:or)?|ERR?(?:OR)?|[Cc]rit?(?:ical)?|CRIT?(?:ICAL)?|[Ff]atal|FATAL|[Ss]evere|SEVERE|EMERG(?:ENCY)?|[Ee]merg(?:ency)?))\|%{GREEDYDATA:log_type}\|%{GREEDYDATA:application}\|%{GREEDYDATA:microservice}\|%{UUID:uuid}\|(?<message_type>[^\]]+)\|(?<fullmessage>(.|\r|\n)*))
    </grok>     
  </parse>
</filter>  

<filter ms-logs-application>
  @type record_transformer
  remove_keys fullmessage
  enable_ruby     
  <record>
    host.name ${hostname}
    remote_ip "#{(Socket.ip_address_list.detect do |intf| intf.ipv4_private? end).ip_address}"
    log.file.path "${record['tailed_path']}"
    #remote_ip "%#{@metadata.ip_address}"
  </record>
</filter>

<match ms-logs-application>
  @type rewrite_tag_filter
  num_threads 8
  <rule>
    key grokfailure
    pattern /.*/
    tag grokfailure_log_app
  </rule>
  <rule>
    key application
    pattern /.*/
    tag ms-logs-app-matched
  </rule>     
</match>

<match ms-logs-app-matched>
  @type elasticsearch_dynamic
  ---
</match>   

<match grokfailure_log_app>
  @type elasticsearch_dynamic
  ----
</match>     

0 个答案:

没有答案
相关问题